diff --git a/controllers/windowsmachine_controller.go b/controllers/windowsmachine_controller.go index 0bac2146f..c94cc29ee 100644 --- a/controllers/windowsmachine_controller.go +++ b/controllers/windowsmachine_controller.go @@ -7,8 +7,8 @@ import ( "strings" oconfig "github.com/openshift/api/config/v1" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" - mclient "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" + mclient "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1" "github.com/pkg/errors" core "k8s.io/api/core/v1" k8sapierrors "k8s.io/apimachinery/pkg/api/errors" diff --git a/controllers/windowsmachine_controller_test.go b/controllers/windowsmachine_controller_test.go index 725cb7e37..179c5a6e2 100644 --- a/controllers/windowsmachine_controller_test.go +++ b/controllers/windowsmachine_controller_test.go @@ -4,7 +4,7 @@ import ( "fmt" "testing" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/go.mod b/go.mod index d6d644687..454e0cadf 100644 --- a/go.mod +++ b/go.mod @@ -2,33 +2,28 @@ module github.com/openshift/windows-machine-config-operator go 1.16 -replace ( - // These are coming from the machine-api repo - sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210622023641-c69a3acaee27 - sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210816141152-a7c40345b994 -) - require ( + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/aws/aws-sdk-go v1.38.23 github.com/go-logr/logr v0.4.0 - github.com/openshift/api v0.0.0-20210816181336-8ff39b776da3 - github.com/openshift/client-go v0.0.0-20210730113412-1811c1b3fc0e + github.com/openshift/api v0.0.0-20211103131146-a0807454a876 + github.com/openshift/client-go v0.0.0-20211025111749-96ca2abfc56c github.com/openshift/library-go v0.0.0-20210811133500-5e31383de2a7 - github.com/openshift/machine-api-operator v0.2.1-0.20210820103535-d50698c302f5 github.com/operator-framework/api v0.10.5 github.com/operator-framework/operator-lib v0.4.0 github.com/pkg/errors v0.9.1 github.com/pkg/sftp v1.11.0 github.com/prometheus-operator/prometheus-operator/pkg/client v0.45.0 + github.com/spf13/cobra v1.2.1 // indirect github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 golang.org/x/mod v0.4.2 + golang.org/x/tools v0.1.5 // indirect k8s.io/api v0.22.1 k8s.io/apimachinery v0.22.1 k8s.io/client-go v0.22.1 k8s.io/kubectl v0.22.1 - sigs.k8s.io/cluster-api-provider-aws v0.0.0-00010101000000-000000000000 - sigs.k8s.io/cluster-api-provider-azure v0.0.0-00010101000000-000000000000 + k8s.io/utils v0.0.0-20210802155522-efc7438f0176 // indirect sigs.k8s.io/controller-runtime v0.9.6 ) diff --git a/go.sum b/go.sum index 18f08b09e..04c7ed4ec 100644 --- a/go.sum +++ b/go.sum @@ -39,7 +39,6 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v48.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= @@ -68,7 +67,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= @@ -78,11 +76,9 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -93,10 +89,6 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -105,20 +97,14 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.38.23 h1:lSLWSu2itm9eH45iwiFCdcjFyU7Ec0oS0CNHr+/mVek= github.com/aws/aws-sdk-go v1.38.23/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -133,10 +119,7 @@ github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnweb github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -147,7 +130,6 @@ github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -156,13 +138,9 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -174,7 +152,6 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -189,33 +166,25 @@ github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20190731215715-7f13a5c99f4b/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -239,14 +208,10 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -261,18 +226,15 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= @@ -316,7 +278,6 @@ github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pL github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -327,21 +288,15 @@ github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= -github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -382,9 +337,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= @@ -431,7 +383,6 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -441,18 +392,13 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.4/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v0.0.0-20191024121256-f395758b854c/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -466,9 +412,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -479,7 +423,6 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -490,22 +433,16 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -519,7 +456,6 @@ github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBv github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -544,15 +480,11 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kubernetes-sigs/kube-storage-version-migrator v0.0.0-20191127225502-51849bc15f17/go.mod h1:enH0BVV+4+DAgWdwSlMefG8bBzTfVMTr1lApzdLZ/cc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -564,7 +496,6 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= @@ -597,7 +528,6 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= @@ -610,134 +540,67 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/openshift/api v0.0.0-20200326152221-912866ddb162/go.mod h1:RKMJ5CBnljLfnej+BJ/xnOWc3kZDvJUaIAEq2oKSPtE= -github.com/openshift/api v0.0.0-20200424083944-0422dc17083e/go.mod h1:VnbEzX8SAaRj7Yfl836NykdQIlbEjfL6/CD+AaJQg5Q= -github.com/openshift/api v0.0.0-20200827090112-c05698d102cf/go.mod h1:M3xexPhgM8DISzzRpuFUy+jfPjQPIcs9yqEYj17mXV8= -github.com/openshift/api v0.0.0-20200901182017-7ac89ba6b971/go.mod h1:M3xexPhgM8DISzzRpuFUy+jfPjQPIcs9yqEYj17mXV8= -github.com/openshift/api v0.0.0-20210331162552-3e31249e6a55/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= -github.com/openshift/api v0.0.0-20210331193751-3acddb19d360/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= -github.com/openshift/api v0.0.0-20210409143810-a99ffa1cac67/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= -github.com/openshift/api v0.0.0-20210412212256-79bd8cfbbd59/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= -github.com/openshift/api v0.0.0-20210416115537-a60c0dc032fd/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= github.com/openshift/api v0.0.0-20210730095913-85e1d547cdee/go.mod h1:ntkQrC1Z6AxxkhDlVpDVjkD+pzdwVUalWyfH40rSyyM= -github.com/openshift/api v0.0.0-20210816181336-8ff39b776da3 h1:vpZFb+oYWjlhGa5Ni5vk+EGPBwDfzBFc+05HO2EhKhE= -github.com/openshift/api v0.0.0-20210816181336-8ff39b776da3/go.mod h1:x81TFA31x1OMT9SYWukQqJ/KbmeveRN6fo+XeGRK8g0= -github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= -github.com/openshift/build-machinery-go v0.0.0-20200424080330-082bf86082cc/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= -github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/api v0.0.0-20211025104849-a11323ccb6ea/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8= +github.com/openshift/api v0.0.0-20211103131146-a0807454a876 h1:3hzIbKbR7yyVIkNV1kHo6uv6nhgQMpmN4fbWjYOQhzM= +github.com/openshift/api v0.0.0-20211103131146-a0807454a876/go.mod h1:RsQCVJu4qhUawxxDP7pGlwU3IA4F01wYm3qKEu29Su8= github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/openshift/client-go v0.0.0-20200326155132-2a6cd50aedd0/go.mod h1:uUQ4LClRO+fg5MF/P6QxjMCb1C9f7Oh4RKepftDnEJE= -github.com/openshift/client-go v0.0.0-20200827190008-3062137373b5/go.mod h1:5rGmrkQ8DJEUXA+AR3rEjfH+HFyg4/apY9iCQFgvPfE= -github.com/openshift/client-go v0.0.0-20210331195552-cf6c2669e01f/go.mod h1:hHaRJ6vp2MRd/CpuZ1oJkqnMGy5eEnoAkQmKPZKcUPI= -github.com/openshift/client-go v0.0.0-20210409155308-a8e62c60e930/go.mod h1:uBPbAyIbjMuhPQy4NgF8q1alNGX2qA8bXIkAycsSDc0= -github.com/openshift/client-go v0.0.0-20210730113412-1811c1b3fc0e h1:vhwzeXUxLd6JZlWZ+miBzTEpmVctHyHNq9z43ScYxWI= github.com/openshift/client-go v0.0.0-20210730113412-1811c1b3fc0e/go.mod h1:P1pjphFOgm/nYjmtouHGaSLGtdP25dQICJnYtcYhfEs= -github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210622023641-c69a3acaee27 h1:qGVJKRO4kDUFkgpw5HSRpOspc+KQCr8Pewv70kfU4oI= -github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210622023641-c69a3acaee27/go.mod h1:H7HAYDpnuhh2UAphBmN0xO2cCfkBR8dkNuXFpVKeW24= -github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210816141152-a7c40345b994 h1:+fIg6L9grNEfuQBi0TrGuNNWvmLfQ9na0TWJBiEbr3U= -github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210816141152-a7c40345b994/go.mod h1:GR+ocB8I+Z7JTSBdO+DMu/diBfH66lRlRpnc1KWysUM= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200701112720-3a7d727c9a10/go.mod h1:wgkZrOlcIMWTzo8khB4Js2PoDJDlIUUdzCBm7BuDdqw= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200713133651-5c8a640669ac/go.mod h1:XVYX9JE339nKbDDa/W481XD+1GTeqeaBm8bDPr7WE7I= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20200901173901-9056dbc8c9b9/go.mod h1:rcwAydGZX+z4l91wtOdbq+fqDwuo6iu0YuFik3UUc+8= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201002065957-9854f7420570/go.mod h1:7NRECVE26rvP1/fs1CbhfY5gsgnnFQNhb9txTFzWmUw= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20201201000827-1117a4fc438c/go.mod h1:21N0wWjiTQypZ7WosEYhcGJHr9JoDR1RBFztE0NvdYM= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210615203611-a02074e8d5bb h1:vFMc7likijMQ1u4yhrjv2JDaK6wrXwC+iP3CaHHG6sE= -github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210615203611-a02074e8d5bb/go.mod h1:KLZ9LTM+EPEny4VXWNbBkJndhfKakmlqafeOfC0jxXA= -github.com/openshift/library-go v0.0.0-20200512120242-21a1ff978534/go.mod h1:2kWwXTkpoQJUN3jZ3QW88EIY1hdRMqxgRs2hheEW/pg= -github.com/openshift/library-go v0.0.0-20200909173121-1d055d971916/go.mod h1:6vwp+YhYOIlj8MpkQKkebTTSn2TuYyvgiAFQ206jIEQ= -github.com/openshift/library-go v0.0.0-20210408164723-7a65fdb398e2/go.mod h1:pnz961veImKsbn7pQcuFbcVpCQosYiC1fUOjzEDeOLU= +github.com/openshift/client-go v0.0.0-20211025111749-96ca2abfc56c h1:2yUcA5LUiKqEnRnqW8MYaB3/7tEAFqCjLtVcbDFlIjg= +github.com/openshift/client-go v0.0.0-20211025111749-96ca2abfc56c/go.mod h1:xigLF97kzy1PZuDsC0Lfu6GlzChRt62+2Ts/nG3sPHY= github.com/openshift/library-go v0.0.0-20210811133500-5e31383de2a7 h1:aCoE+Q7jLvV7MFL2aZOnzO3dK6rpSuEE7273ijnAIWU= github.com/openshift/library-go v0.0.0-20210811133500-5e31383de2a7/go.mod h1:3GagmGg6gikg+hAqma7E7axBzs2pjx4+GrAbdl4OYdY= -github.com/openshift/machine-api-operator v0.2.1-0.20200611014855-9a69f85c32dd/go.mod h1:6vMi+R3xqznBdq5rgeal9N3ak3sOpy50t0fdRCcQXjE= -github.com/openshift/machine-api-operator v0.2.1-0.20200701225707-950912b03628/go.mod h1:cxjy/RUzv5C2T5FNl1KKXUgtakWsezWQ642B/CD9VQA= -github.com/openshift/machine-api-operator v0.2.1-0.20200722104429-f4f9b84df9b7/go.mod h1:XDsNRAVEJtkI00e51SAZ/PnqNJl1zv0rHXSdl9L1oOY= -github.com/openshift/machine-api-operator v0.2.1-0.20200926044412-b7d860f8074c/go.mod h1:cp/wPVzxHZeLUjOLkNPNqrk4wyyW6HuHd3Kz9+hl5xw= -github.com/openshift/machine-api-operator v0.2.1-0.20201002104344-6abfb5440597/go.mod h1:+oAfoCl+TUd2TM79/6NdqLpFUHIJpmqkKdmiHe2O7mw= -github.com/openshift/machine-api-operator v0.2.1-0.20210504014029-a132ec00f7dd/go.mod h1:DFZBMPtC2TYZH5NE9+2JQIpbZAnruqc9F26QmbOm9pw= -github.com/openshift/machine-api-operator v0.2.1-0.20210820103535-d50698c302f5 h1:vLt0tE9HQoi7E+oBxz5SLco7DO9roozOHdewOYZzTIA= -github.com/openshift/machine-api-operator v0.2.1-0.20210820103535-d50698c302f5/go.mod h1:ko7xmso6c25h9UL6Ai0I5l+6OgyVf+ebinAYXnwlGNg= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/operator-framework/api v0.5.2/go.mod h1:L7IvLd/ckxJEJg/t4oTTlnHKAJIP/p51AvEslW3wYdY= github.com/operator-framework/api v0.10.5 h1:/WvLKOPo8zZMyEmuW0kLC0PJBt4Xal8HZkFioKIxqTA= github.com/operator-framework/api v0.10.5/go.mod h1:tV0BUNvly7szq28ZPBXhjp1Sqg5yHCOeX19ui9K4vjI= github.com/operator-framework/operator-lib v0.4.0 h1:g7tGRo+FikHgFZDmRdHkOxyTv3sViI+Ujiqbfd9Tfsk= github.com/operator-framework/operator-lib v0.4.0/go.mod h1:kOjV7h01DCSw3RZAqYdHyHyVwuJL8hvG53tSZoDZfsQ= -github.com/operator-framework/operator-sdk v0.5.1-0.20190301204940-c2efe6f74e7b/go.mod h1:iVyukRkam5JZa8AnjYf+/G3rk7JI1+M6GsU0sq0B9NA= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI= @@ -752,45 +615,33 @@ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.44.1/g github.com/prometheus-operator/prometheus-operator/pkg/client v0.45.0 h1:2qET5dx1d91irZVhvemXWumYtIiQ+qGBZ0fRtIGrhFA= github.com/prometheus-operator/prometheus-operator/pkg/client v0.45.0/go.mod h1:k4BrWlVQQsvBiTcDnKEMgyh/euRxyxgrHdur/ZX/sdA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -800,7 +651,6 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= @@ -810,12 +660,10 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -841,9 +689,6 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= @@ -864,12 +709,7 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1 github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vmware/govmomi v0.22.2/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= -github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= @@ -885,7 +725,6 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -897,8 +736,6 @@ go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVd go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -922,23 +759,19 @@ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee33 go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= @@ -953,7 +786,6 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1013,7 +845,6 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1036,7 +867,6 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1091,7 +921,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1118,7 +947,6 @@ golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1143,11 +971,9 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201018230417-eeed37f84f13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1187,11 +1013,9 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1227,8 +1051,6 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1253,7 +1075,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201020123448-f5c826d1900e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1269,14 +1090,12 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1293,7 +1112,6 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.33.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= @@ -1301,7 +1119,6 @@ google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBz google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1314,7 +1131,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1354,13 +1170,10 @@ google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -1403,7 +1216,6 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= @@ -1418,14 +1230,12 @@ gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1440,7 +1250,6 @@ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81 gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1450,137 +1259,80 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= k8s.io/api v0.18.0-beta.2/go.mod h1:2oeNnWEqcSmaM/ibSh3t7xcIqbkGXhzZdn4ezV9T4m0= -k8s.io/api v0.18.0-rc.1/go.mod h1:ZOh6SbHjOYyaMLlWmB2+UOQKEWDpCnVEVpEyt7S2J9s= -k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= -k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.21.0-rc.0/go.mod h1:Dkc/ZauWJrgZhjOjeBgW89xZQiTBJA2RaBKYHXPsi2Y= -k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= -k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/api v0.22.0-rc.0/go.mod h1:EUcKB6RvpW74HMRUSSNwpUzrIHBdGT1FeAvOV+txic0= -k8s.io/api v0.22.0/go.mod h1:0AoXXqst47OI/L0oGKq9DG61dvGRPXs7X4/B7KyjBCU= k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= k8s.io/apiextensions-apiserver v0.18.0-beta.2/go.mod h1:Hnrg5jx8/PbxRbUoqDGxtQkULjwx8FDW4WYJaKNK+fk= k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= -k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= -k8s.io/apiextensions-apiserver v0.19.0/go.mod h1:znfQxNpjqz/ZehvbfMg5N6fvBJW5Lqu5HVLTJQdP4Fs= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= -k8s.io/apiextensions-apiserver v0.21.0-rc.0/go.mod h1:ItIoMBJU1gy93Qwr/B2699r4b0VmZqAOU+15BvozxMY= -k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc= k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= -k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= k8s.io/apiextensions-apiserver v0.22.0-rc.0 h1:UtPjFr6a4FckzD+JCSgDEafpcNSnXzdRPpoV7gMWOLI= k8s.io/apiextensions-apiserver v0.22.0-rc.0/go.mod h1:KSr+2VJ6ye8Fy50q7xHZ/Tw8vrRII82KIKbz9eUFmeo= k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.18.0-beta.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.0-rc.1/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.21.0-rc.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= -k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.22.0-rc.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= -k8s.io/apimachinery v0.22.0/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= k8s.io/apiserver v0.18.0-beta.2/go.mod h1:bnblMkMoCFnIfVnVftd0SXJPzyvrk3RtaqSbblphF/A= k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= -k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= -k8s.io/apiserver v0.19.0/go.mod h1:XvzqavYj73931x7FLtyagh8WibHpePJ1QwWrSJs2CLk= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.21.0-rc.0/go.mod h1:QlW7+1CZTZtAcKvJ34/n4DIb8sC93FeQpkd1KSU+Sok= -k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= -k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= k8s.io/apiserver v0.22.0-rc.0/go.mod h1:1AfFSkRbaPVFzfSIWd0m/onp49mmAOqXR9qrLJFixlw= -k8s.io/apiserver v0.22.0/go.mod h1:04kaIEzIQrTGJ5syLppQWvpkLJXQtJECHmae+ZGc/nc= -k8s.io/cli-runtime v0.18.0-rc.1/go.mod h1:yuKZYDG8raONmwjwIkT77lCfIuPwX+Bsp88MKYf1TlU= -k8s.io/cli-runtime v0.19.0/go.mod h1:tun9l0eUklT8IHIM0jors17KmUjcrAxn0myoBYwuNuo= -k8s.io/cli-runtime v0.21.0/go.mod h1:XoaHP93mGPF37MkLbjGVYqg3S1MnsFdKtiA/RZzzxOo= -k8s.io/cli-runtime v0.22.0/go.mod h1:An6zELQ7udUI0GaXvkuMqyopPA14dIgNqpH8cZu1vig= k8s.io/cli-runtime v0.22.1 h1:WIueieKvT+IiSVSFosRLI6rkM0tyBGEGH1WUEztVjho= k8s.io/cli-runtime v0.22.1/go.mod h1:YqwGrlXeEk15Yn3em2xzr435UGwbrCw5x+COQoTYfoo= k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= k8s.io/client-go v0.18.0-beta.2/go.mod h1:UvuVxHjKWIcgy0iMvF+bwNDW7l0mskTNOaOW1Qv5BMA= -k8s.io/client-go v0.18.0-rc.1/go.mod h1:0lGW/AaaFfNWlmyYvWSJrtaDlti7oNRyCjq4CNK/Ipk= -k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= -k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= -k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.21.0-rc.0/go.mod h1:zU5HY/bSOKH3YOqoge9nFvICgrpeSdJu8DQ4fkjKIZk= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= -k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= k8s.io/client-go v0.22.0-rc.0/go.mod h1:BZGppBKJh4UtgDZcIIh6vHJsJ1iZiXS7EwKZYWhyklo= -k8s.io/client-go v0.22.0/go.mod h1:GUjIuXR5PiEv/RVK5OODUsm6eZk7wtSWZSaSJbpFdGg= k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw= k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.18.0-beta.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/code-generator v0.18.0-rc.1/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.3/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= -k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg= -k8s.io/code-generator v0.21.0-rc.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/code-generator v0.21.0/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= -k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.22.0-rc.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= -k8s.io/code-generator v0.22.0/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= k8s.io/component-base v0.18.0-beta.2/go.mod h1:HVk5FpRnyzQ/MjBr9//e/yEBjTVa2qjGXCTuUzcD7ks= -k8s.io/component-base v0.18.0-rc.1/go.mod h1:NNlRaxZEdLqTs2+6yXiU2SHl8gKsbcy19Ii+Sfq53RM= k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= -k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= -k8s.io/component-base v0.19.0/go.mod h1:dKsY8BxkA+9dZIAh2aWJLL/UdASFDNtGYTCItL4LM7Y= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.21.0-rc.0/go.mod h1:XlP0bM7QJFWRGZYPc5NmphkvsYQ+o7804HWH3GTGjDY= -k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= -k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/component-base v0.22.0-rc.0/go.mod h1:DKSub/kewg24bK+3ZJ/csu86fSBYpGdYk837eCTvEKg= -k8s.io/component-base v0.22.0/go.mod h1:SXj6Z+V6P6GsBhHZVbWCw9hFjUdUYnJerlhhPnYCBCg= k8s.io/component-base v0.22.1 h1:SFqIXsEN3v3Kkr1bS6rstrs1wd45StJqbtgbQ4nRQdo= k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= -k8s.io/component-helpers v0.21.0/go.mod h1:tezqefP7lxfvJyR+0a+6QtVrkZ/wIkyMLK4WcQ3Cj8U= -k8s.io/component-helpers v0.22.0/go.mod h1:YNIbQI59ayNiU8JHlPIxVkOUYycbKhk5Niy0pcyJOEY= k8s.io/component-helpers v0.22.1/go.mod h1:QvBcDbX+qU5I2tMZABBF5fRwAlQwiv771IGBHK9WYh4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -1589,15 +1341,11 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-aggregator v0.18.0-beta.2/go.mod h1:O3Td9mheraINbLHH4pzoFP2gRzG0Wk1COqzdSL4rBPk= -k8s.io/kube-aggregator v0.18.2/go.mod h1:ijq6FnNUoKinA6kKbkN6svdTacSoQVNtKqmQ1+XJEYQ= -k8s.io/kube-aggregator v0.19.0/go.mod h1:1Ln45PQggFAG8xOqWPIYMxUq8WNtpPnYsbUJ39DpF/A= -k8s.io/kube-aggregator v0.21.0-rc.0/go.mod h1:M+whOmsAeQf8ObJ0/eO9Af1Dz2UQEB9OW9BWmt9b2sU= k8s.io/kube-aggregator v0.22.0-rc.0/go.mod h1:g0xtiBSsbMKvewN7xR/Icib4TrHxtvrJcHtYvFsgw7k= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= @@ -1607,25 +1355,14 @@ k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAG k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/kubectl v0.18.0-rc.1/go.mod h1:UpG1w7klD633nyMS73/29cNl2tMdEbXU0nWupttyha4= -k8s.io/kubectl v0.19.0/go.mod h1:gPCjjsmE6unJzgaUNXIFGZGafiUp5jh0If3F/x7/rRg= -k8s.io/kubectl v0.21.0/go.mod h1:EU37NukZRXn1TpAkMUoy8Z/B2u6wjHDS4aInsDzVvks= -k8s.io/kubectl v0.22.0/go.mod h1:eeuP92uZbVL2UnOaf0nj9OjtI0hi/nekHU+0isURer0= k8s.io/kubectl v0.22.1 h1:kpXO+ajPNTzAVLDM9pAzCsWH9MtCMr92zpcvXMt7P6E= k8s.io/kubectl v0.22.1/go.mod h1:mjAOgEbMNMtZWxnfM6jd+nPjPsaoLqO5xanc78WcSbw= -k8s.io/metrics v0.18.0-rc.1/go.mod h1:ME3EkXCyiZ7mVFEiAYKBfuo3JkpgggeATG+DBUQby5o= -k8s.io/metrics v0.19.0/go.mod h1:WykpW8B60OeAJx1imdwUgyOID2kDljr/Q+1zrPJ98Wo= -k8s.io/metrics v0.21.0/go.mod h1:L3Ji9EGPP1YBbfm9sPfEXSpnj8i24bfQbAFAsW0NueQ= -k8s.io/metrics v0.22.0/go.mod h1:eYnwafAUNLLpVmY/msoq0RKIKH5C4TzfjKnMZ0Xrt3A= k8s.io/metrics v0.22.1/go.mod h1:i/ZNap89UkV1gLa26dn7fhKAdheJaKy+moOqJbiif7E= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200229041039-0a110f9eb7ab/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200327001022-6496210b90e8/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -1640,38 +1377,22 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.21/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= -sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= sigs.k8s.io/controller-runtime v0.8.0/go.mod h1:v9Lbj5oX443uR7GXYY46E0EE2o7k2YxQ58GxVNeXSW4= -sigs.k8s.io/controller-runtime v0.9.0-alpha.1.0.20210413130450-7ef2da0bc161/go.mod h1:ufPDuvefw2Y1KnBgHQrLdOjueYlj+XJV2AszbT+WTxs= -sigs.k8s.io/controller-runtime v0.9.0-beta.1.0.20210512131817-ce2f0c92d77e/go.mod h1:ufPDuvefw2Y1KnBgHQrLdOjueYlj+XJV2AszbT+WTxs= sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= -sigs.k8s.io/controller-runtime v0.9.3/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= sigs.k8s.io/controller-runtime v0.9.6 h1:EevVMlgUj4fC1NVM4+DB3iPkWkmGRNarA66neqv9Qew= sigs.k8s.io/controller-runtime v0.9.6/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= -sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= sigs.k8s.io/controller-tools v0.6.0/go.mod h1:baRMVPrctU77F+rfAuH2uPqW93k6yQnZA2dhUOr7ihc= -sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= -sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= -sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/kustomize/api v0.8.5/go.mod h1:M377apnKT5ZHJS++6H4rQoCHmWtt6qTpp3mbe7p6OLY= sigs.k8s.io/kustomize/api v0.8.11 h1:LzQzlq6Z023b+mBtc6v72N2mSHYmN8x7ssgbf/hv0H8= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= -sigs.k8s.io/kustomize/cmd/config v0.9.7/go.mod h1:MvXCpHs77cfyxRmCNUQjIqCmZyYsbn5PyQpWiq44nW0= sigs.k8s.io/kustomize/cmd/config v0.9.13/go.mod h1:7547FLF8W/lTaDf0BDqFTbZxM9zqwEJqCKN9sSR0xSs= -sigs.k8s.io/kustomize/kustomize/v4 v4.0.5/go.mod h1:C7rYla7sI8EnxHE/xEhRBSHMNfcL91fx0uKmUlUhrBk= sigs.k8s.io/kustomize/kustomize/v4 v4.2.0/go.mod h1:MOkR6fmhwG7hEDRXBYELTi5GSFcLwfqwzTRHW3kv5go= -sigs.k8s.io/kustomize/kyaml v0.10.15/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/kustomize/kyaml v0.11.0 h1:9KhiCPKaVyuPcgOLJXkvytOvjMJLoxpjodiycb4gHsA= sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= @@ -1682,12 +1403,9 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnM sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/main.go b/main.go index 4be5e2d87..abf810feb 100644 --- a/main.go +++ b/main.go @@ -7,7 +7,7 @@ import ( "os" "strings" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" operators "github.com/operator-framework/api/pkg/operators/v2" "github.com/operator-framework/operator-lib/leader" "github.com/spf13/pflag" diff --git a/test/e2e/clusterinfo/openshift.go b/test/e2e/clusterinfo/openshift.go index bbab42279..b3346bd29 100644 --- a/test/e2e/clusterinfo/openshift.go +++ b/test/e2e/clusterinfo/openshift.go @@ -7,9 +7,9 @@ import ( config "github.com/openshift/api/config/v1" configClient "github.com/openshift/client-go/config/clientset/versioned" imageClient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + mapiClient "github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1" operatorClient "github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1" routeClient "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" - mapiClient "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1" monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" diff --git a/test/e2e/create_test.go b/test/e2e/create_test.go index 2070ec58b..83470dafa 100644 --- a/test/e2e/create_test.go +++ b/test/e2e/create_test.go @@ -10,7 +10,7 @@ import ( "time" config "github.com/openshift/api/config/v1" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/test/e2e/delete_test.go b/test/e2e/delete_test.go index 5ae544737..f22987b6c 100644 --- a/test/e2e/delete_test.go +++ b/test/e2e/delete_test.go @@ -7,7 +7,7 @@ import ( "testing" config "github.com/openshift/api/config/v1" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/test/e2e/providers/aws/aws.go b/test/e2e/providers/aws/aws.go index 7f73d30ef..3f7cd8bd3 100644 --- a/test/e2e/providers/aws/aws.go +++ b/test/e2e/providers/aws/aws.go @@ -15,11 +15,10 @@ import ( "github.com/aws/aws-sdk-go/service/ec2/ec2iface" "github.com/aws/aws-sdk-go/service/iam" config "github.com/openshift/api/config/v1" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - awsprovider "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1" "github.com/openshift/windows-machine-config-operator/test/e2e/clusterinfo" ) @@ -344,27 +343,27 @@ func (a *awsProvider) GenerateMachineSet(withWindowsLabel bool, replicas int32) machineLabels[k] = v } - providerSpec := &awsprovider.AWSMachineProviderConfig{ - AMI: awsprovider.AWSResourceReference{ + providerSpec := &mapi.AWSMachineProviderConfig{ + AMI: mapi.AWSResourceReference{ ID: &a.imageID, }, InstanceType: a.instanceType, - IAMInstanceProfile: &awsprovider.AWSResourceReference{ + IAMInstanceProfile: &mapi.AWSResourceReference{ ID: instanceProfile.Name, }, CredentialsSecret: &core.LocalObjectReference{ Name: "aws-cloud-credentials", }, - SecurityGroups: []awsprovider.AWSResourceReference{ + SecurityGroups: []mapi.AWSResourceReference{ { ID: &sgID, }, }, - Subnet: awsprovider.AWSResourceReference{ + Subnet: mapi.AWSResourceReference{ ID: subnet.SubnetId, }, // query placement - Placement: awsprovider.Placement{ + Placement: mapi.Placement{ Region: a.region, AvailabilityZone: *subnet.AvailabilityZone, }, diff --git a/test/e2e/providers/azure/azure.go b/test/e2e/providers/azure/azure.go index 1f1a4dcb8..dc4fa8a6a 100644 --- a/test/e2e/providers/azure/azure.go +++ b/test/e2e/providers/azure/azure.go @@ -6,11 +6,10 @@ import ( "fmt" config "github.com/openshift/api/config/v1" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - azureprovider "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1" "github.com/openshift/windows-machine-config-operator/test/e2e/clusterinfo" ) @@ -48,7 +47,7 @@ func New(clientset *clusterinfo.OpenShift, hasCustomVXLANPort bool) (*Provider, } // newAzureMachineProviderSpec returns an AzureMachineProviderSpec generated from the inputs, or an error -func newAzureMachineProviderSpec(clusterID string, status *config.PlatformStatus, location, zone, vmSize string) (*azureprovider.AzureMachineProviderSpec, error) { +func newAzureMachineProviderSpec(clusterID string, status *config.PlatformStatus, location, zone, vmSize string) (*mapi.AzureMachineProviderSpec, error) { if clusterID == "" { return nil, fmt.Errorf("clusterID is empty") } @@ -64,7 +63,7 @@ func newAzureMachineProviderSpec(clusterID string, status *config.PlatformStatus rg := status.Azure.ResourceGroupName netrg := status.Azure.NetworkResourceGroupName - return &azureprovider.AzureMachineProviderSpec{ + return &mapi.AzureMachineProviderSpec{ TypeMeta: meta.TypeMeta{ APIVersion: "azureproviderconfig.openshift.io/v1beta1", Kind: "AzureMachineProviderSpec", @@ -80,16 +79,16 @@ func newAzureMachineProviderSpec(clusterID string, status *config.PlatformStatus Location: location, Zone: &zone, VMSize: vmSize, - Image: azureprovider.Image{ + Image: mapi.Image{ Publisher: defaultImagePublisher, Offer: defaultImageOffer, SKU: defaultImageSKU, Version: defaultImageVersion, }, - OSDisk: azureprovider.OSDisk{ + OSDisk: mapi.OSDisk{ OSType: "Windows", DiskSizeGB: defaultOSDiskSizeGB, - ManagedDisk: azureprovider.ManagedDiskParameters{ + ManagedDisk: mapi.ManagedDiskParameters{ StorageAccountType: defaultStorageAccountType, }, }, @@ -118,7 +117,7 @@ func (p *Provider) GenerateMachineSet(withWindowsLabel bool, replicas int32) (*m if err != nil { return nil, fmt.Errorf("failed to get master-0 machine resource: %v", err) } - masterProviderSpec := new(azureprovider.AzureMachineProviderSpec) + masterProviderSpec := new(mapi.AzureMachineProviderSpec) err = json.Unmarshal(machines.Spec.ProviderSpec.Value.Raw, masterProviderSpec) if err != nil { return nil, fmt.Errorf("failed to unmarshal master-0 azure machine provider spec: %v", err) diff --git a/test/e2e/providers/cloudprovider.go b/test/e2e/providers/cloudprovider.go index 1cef08f8f..30c3cd62b 100644 --- a/test/e2e/providers/cloudprovider.go +++ b/test/e2e/providers/cloudprovider.go @@ -4,7 +4,7 @@ import ( "fmt" config "github.com/openshift/api/config/v1" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" "github.com/pkg/errors" oc "github.com/openshift/windows-machine-config-operator/test/e2e/clusterinfo" diff --git a/test/e2e/providers/vsphere/vsphere.go b/test/e2e/providers/vsphere/vsphere.go index ad71cafc6..ac1c78fae 100644 --- a/test/e2e/providers/vsphere/vsphere.go +++ b/test/e2e/providers/vsphere/vsphere.go @@ -10,8 +10,7 @@ import ( "github.com/pkg/errors" config "github.com/openshift/api/config/v1" - mapi "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" - vsphere "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" + mapi "github.com/openshift/api/machine/v1beta1" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -34,7 +33,7 @@ func New(clientset *clusterinfo.OpenShift) (*Provider, error) { } // newVSphereMachineProviderSpec returns a vSphereMachineProviderSpec generated from the inputs, or an error -func (p *Provider) newVSphereMachineProviderSpec(clusterID string) (*vsphere.VSphereMachineProviderSpec, error) { +func (p *Provider) newVSphereMachineProviderSpec(clusterID string) (*mapi.VSphereMachineProviderSpec, error) { if clusterID == "" { return nil, fmt.Errorf("clusterID is empty") } @@ -53,7 +52,7 @@ func (p *Provider) newVSphereMachineProviderSpec(clusterID string) (*vsphere.VSp log.Printf("creating machineset based on template %s\n", vmTemplate) - return &vsphere.VSphereMachineProviderSpec{ + return &mapi.VSphereMachineProviderSpec{ TypeMeta: meta.TypeMeta{ APIVersion: "vsphereprovider.openshift.io/v1beta1", Kind: "VSphereMachineProviderSpec", @@ -63,8 +62,8 @@ func (p *Provider) newVSphereMachineProviderSpec(clusterID string) (*vsphere.VSp }, DiskGiB: int32(128), MemoryMiB: int64(16384), - Network: vsphere.NetworkSpec{ - Devices: []vsphere.NetworkDeviceSpec{{NetworkName: getNetwork()}}, + Network: mapi.NetworkSpec{ + Devices: []mapi.NetworkDeviceSpec{{NetworkName: getNetwork()}}, }, NumCPUs: int32(4), NumCoresPerSocket: int32(1), @@ -74,7 +73,7 @@ func (p *Provider) newVSphereMachineProviderSpec(clusterID string) (*vsphere.VSp } // getWorkspaceFromExistingMachineSet returns Workspace from a machineset provisioned during installation -func (p *Provider) getWorkspaceFromExistingMachineSet(clusterID string) (*vsphere.Workspace, error) { +func (p *Provider) getWorkspaceFromExistingMachineSet(clusterID string) (*mapi.Workspace, error) { if clusterID == "" { return nil, fmt.Errorf("clusterID is empty") } @@ -93,7 +92,7 @@ func (p *Provider) getWorkspaceFromExistingMachineSet(clusterID string) (*vspher if providerSpecRaw == nil || providerSpecRaw.Raw == nil { return nil, errors.Wrap(err, "no provider spec found") } - var providerSpec vsphere.VSphereMachineProviderSpec + var providerSpec mapi.VSphereMachineProviderSpec err = json.Unmarshal(providerSpecRaw.Raw, &providerSpec) if err != nil { return nil, errors.Wrap(err, "unable to unmarshal providerSpec") diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml index e27fa52f7..3e27d0e9b 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -112,7 +112,7 @@ spec: - versionHash properties: availableUpdates: - description: availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. + description: availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified. type: array items: description: Release represents an OpenShift release image and associated metadata. @@ -133,6 +133,137 @@ spec: description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. type: string nullable: true + conditionalUpdates: + description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. + type: array + items: + description: ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster. + type: object + required: + - release + - risks + properties: + conditions: + description: 'conditions represents the observations of the conditional update''s current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + release: + description: release is the target of the update. + type: object + properties: + channels: + description: channels is the set of Cincinnati channels to which the release currently belongs. + type: array + items: + type: string + image: + description: image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version. + type: string + url: + description: url contains information about this release. This URL is set by the 'url' metadata property on a release or the metadata returned by the update API and should be displayed as a link in user interfaces. The URL field may not be set for test or nightly releases. + type: string + version: + description: version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified. + type: string + risks: + description: risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update. + type: array + minItems: 1 + items: + description: ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update. + type: object + required: + - matchingRules + - message + - name + - url + properties: + matchingRules: + description: matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended. + type: array + minItems: 1 + items: + description: ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate. + type: object + required: + - type + properties: + promql: + description: promQL represents a cluster condition based on PromQL. + type: object + required: + - promql + properties: + promql: + description: PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures. + type: string + type: + description: type represents the cluster-condition type. This defines the members and semantics of any additional properties. + type: string + enum: + - Always + - PromQL + x-kubernetes-list-type: atomic + message: + description: message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + minLength: 1 + name: + description: name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state. + type: string + minLength: 1 + url: + description: url contains information about this risk. + type: string + format: uri + minLength: 1 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-list-type: atomic conditions: description: conditions provides information about the cluster version. The condition "Available" is set to true if the desiredUpdate has been reached. The condition "Progressing" is set to true if an update is being applied. The condition "Degraded" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation. type: array @@ -191,6 +322,9 @@ spec: - state - verified properties: + acceptedRisks: + description: acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets. + type: string completionTime: description: completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update). type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml index 8b6f46eea..246225397 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -43,7 +43,7 @@ spec: description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. type: string noProxy: - description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var. + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. type: string readinessEndpoints: description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml index 609ee1987..3ff78377a 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -73,7 +73,7 @@ spec: - group x-kubernetes-list-type: map profile: - description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: to raise a Red Hat support request, it is required to set this to Default, WriteRequestBodies, or AllRequestBodies to generate audit log events that can be analyzed by support. \n If unset, the 'Default' profile is used as the default." + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. \n If unset, the 'Default' profile is used as the default." type: string default: Default enum: diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml index 442d39a65..f67be27db 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -61,7 +61,7 @@ spec: description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. type: string noProxy: - description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var. + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. type: string readinessEndpoints: description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. @@ -168,7 +168,7 @@ spec: description: httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var. type: string noProxy: - description: noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var. + description: noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var. type: string readinessEndpoints: description: readinessEndpoints is a list of endpoints used to verify readiness of the proxy. diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml new file mode 100644 index 000000000..147c73c44 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_imagecontentpolicy.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/874 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: imagecontentpolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImageContentPolicy + listKind: ImageContentPolicyList + plural: imagecontentpolicies + singular: imagecontentpolicy + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + repositoryDigestMirrors: + description: "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\". \n Each “source” repository is treated independently; configurations for different “source” repositories don’t interact. \n If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. \n When multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified." + type: array + items: + description: RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. + type: object + required: + - source + properties: + allowMirrorByTags: + description: allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue. + type: boolean + mirrors: + description: mirrors is zero or more repositories that may also contain the same images. If the "mirrors" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering. + type: array + items: + type: string + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + x-kubernetes-list-type: set + source: + description: source is the repository that users refer to, e.g. in image pull specifications. + type: string + pattern: ^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$ + x-kubernetes-list-map-keys: + - source + x-kubernetes-list-type: map + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml index 63eefa341..435492290 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -49,6 +49,9 @@ spec: description: platformSpec holds desired information specific to the underlying infrastructure provider. type: object properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object aws: description: AWS contains settings specific to the Amazon Web Services infrastructure provider. type: object @@ -92,8 +95,11 @@ spec: ovirt: description: Ovirt contains settings specific to the oVirt infrastructure provider. type: object + powervs: + description: PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + type: object type: - description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. type: string enum: - "" @@ -109,6 +115,8 @@ spec: - IBMCloud - KubeVirt - EquinixMetal + - PowerVS + - AlibabaCloud vsphere: description: VSphere contains settings specific to the VSphere infrastructure provider. type: object @@ -160,10 +168,51 @@ spec: - IBMCloud - KubeVirt - EquinixMetal + - PowerVS + - AlibabaCloud platformStatus: description: platformStatus holds status information specific to the underlying infrastructure provider. type: object properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object + required: + - region + - resourceGroupID + properties: + region: + description: region specifies the region for Alibaba Cloud resources created for the cluster. + type: string + pattern: ^[0-9A-Za-z-]+$ + resourceGroupID: + description: resourceGroupID is the ID of the resource group for the cluster. + type: string + pattern: ^rg-[0-9A-Za-z]+$ + resourceTags: + description: resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + type: array + maxItems: 20 + items: + description: AlibabaCloudResourceTag is the set of tags to add to apply to resources. + type: object + required: + - key + - value + properties: + key: + description: key is the key of the tag. + type: string + maxLength: 128 + minLength: 1 + value: + description: value is the value of the tag. + type: string + maxLength: 128 + minLength: 1 + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map aws: description: AWS contains settings specific to the Amazon Web Services infrastructure provider. type: object @@ -320,8 +369,40 @@ spec: nodeDNSIP: description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' type: string + powervs: + description: PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + type: object + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + region: + description: region holds the default Power VS region for new Power VS resources created by the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + type: array + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + type: object + required: + - name + - url + properties: + name: + description: name is the name of the Power VS service. + type: string + pattern: ^[a-z0-9-]+$ + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + type: string + format: uri + pattern: ^https:// + zone: + description: 'zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported' + type: string type: - description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." + description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." type: string enum: - "" @@ -337,6 +418,8 @@ spec: - IBMCloud - KubeVirt - EquinixMetal + - PowerVS + - AlibabaCloud vsphere: description: VSphere contains settings specific to the VSphere infrastructure provider. type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml index 1145677c9..95fe8dfd9 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -82,6 +82,8 @@ spec: type: array items: type: object + required: + - domainPatterns properties: domainPatterns: description: "domainPatterns is a list of domains for which the desired HSTS annotations are required. If domainPatterns is specified and a route is created with a spec.host matching one of the domains, the route must specify the HSTS Policy components described in the matching RequiredHSTSPolicy. \n The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*." diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml index 1f75769db..883c623b3 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml @@ -295,16 +295,27 @@ spec: type: array items: type: string + x-kubernetes-list-type: atomic + groups: + description: groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used. + type: array + items: + description: OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo responses + type: string + minLength: 1 + x-kubernetes-list-type: atomic name: description: name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity type: array items: type: string + x-kubernetes-list-type: atomic preferredUsername: description: preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim type: array items: type: string + x-kubernetes-list-type: atomic clientID: description: clientID is the oauth client ID type: string @@ -377,6 +388,7 @@ spec: type: description: type identifies the identity provider type for this entry. type: string + x-kubernetes-list-type: atomic templates: description: templates allow you to customize pages like the login page. type: object @@ -413,7 +425,7 @@ spec: type: object properties: accessTokenInactivityTimeout: - description: accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as "5m", "1.5h" or "2h45m". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. + description: "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime. \n WARNING: existing tokens' timeout will not be affected (lowered) by changing this value" type: string accessTokenInactivityTimeoutSeconds: description: 'accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.' diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index 35eace370..284d06f9a 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -64,6 +64,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ProxyList{}, &Scheduler{}, &SchedulerList{}, + &ImageContentPolicy{}, + &ImageContentPolicyList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1/types.go b/vendor/github.com/openshift/api/config/v1/types.go index 09a9e2070..56d00648e 100644 --- a/vendor/github.com/openshift/api/config/v1/types.go +++ b/vendor/github.com/openshift/api/config/v1/types.go @@ -324,6 +324,7 @@ type RequiredHSTSPolicy struct { // The use of wildcards is allowed like this: *.foo.com matches everything under foo.com. // foo.com only matches foo.com, so to cover foo.com and everything under it, you must specify *both*. // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:Required // +required DomainPatterns []string `json:"domainPatterns"` @@ -332,7 +333,6 @@ type RequiredHSTSPolicy struct { // If set to 0 and includeSubdomains is specified, all subdomains of the host are also removed as HSTS hosts. // maxAge is a time-to-live value, and if this policy is not refreshed on a client, the HSTS // policy will eventually expire on that client. - // +required MaxAge MaxAgePolicy `json:"maxAge"` // preloadPolicy directs the client to include hosts in its host preload list so that diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 12c009069..31801aacf 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -97,9 +97,10 @@ type Audit struct { // HTTP payloads for read requests (get, list). // - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. // - // Warning: to raise a Red Hat support request, it is required to set this to Default, - // WriteRequestBodies, or AllRequestBodies to generate audit log events that can be - // analyzed by support. + // Warning: It is not recommended to disable audit logging by using the `None` profile unless you + // are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. + // If you disable audit logging and a support situation arises, you might need to enable audit logging + // and reproduce the issue in order to troubleshoot properly. // // If unset, the 'Default' profile is used as the default. // diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index ba681e658..bbe359679 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -143,43 +143,50 @@ type ClusterOperatorStatusCondition struct { type ClusterStatusConditionType string const ( - // Available indicates that the operand (eg: openshift-apiserver for the - // openshift-apiserver-operator), is functional and available in the cluster. - // Available=False means at least part of the component is non-functional, - // and that the condition requires immediate administrator intervention. + // Available indicates that the component (operator and all configured operands) + // is functional and available in the cluster. Available=False means at least + // part of the component is non-functional, and that the condition requires + // immediate administrator intervention. OperatorAvailable ClusterStatusConditionType = "Available" - // Progressing indicates that the operator is actively rolling out new code, - // propagating config changes, or otherwise moving from one steady state to - // another. Operators should not report progressing when they are reconciling - // (without action) a previously known state. If the observed cluster state - // has changed and the operator/operand is reacting to it (scaling up for instance), - // Progressing should become true since it is moving from one steady state to - // another. + // Progressing indicates that the component (operator and all configured operands) + // is actively rolling out new code, propagating config changes, or otherwise + // moving from one steady state to another. Operators should not report + // progressing when they are reconciling (without action) a previously known + // state. If the observed cluster state has changed and the component is + // reacting to it (scaling up for instance), Progressing should become true + // since it is moving from one steady state to another. OperatorProgressing ClusterStatusConditionType = "Progressing" - // Degraded indicates that the operator's current state does not match its - // desired state over a period of time resulting in a lower quality of service. - // The period of time may vary by component, but a Degraded state represents - // persistent observation of a condition. As a result, a component should not - // oscillate in and out of Degraded state. A service may be Available even - // if its degraded. For example, your service may desire 3 running pods, but 1 - // pod is crash-looping. The service is Available but Degraded because it - // may have a lower quality of service. A component may be Progressing but - // not Degraded because the transition from one state to another does not - // persist over a long enough period to report Degraded. A service should not - // report Degraded during the course of a normal upgrade. A service may report - // Degraded in response to a persistent infrastructure failure that requires - // eventual administrator intervention. For example, if a control plane host - // is unhealthy and must be replaced. An operator should report Degraded if - // unexpected errors occur over a period, but the expectation is that all - // unexpected errors are handled as operators mature. + // Degraded indicates that the component (operator and all configured operands) + // does not match its desired state over a period of time resulting in a lower + // quality of service. The period of time may vary by component, but a Degraded + // state represents persistent observation of a condition. As a result, a + // component should not oscillate in and out of Degraded state. A component may + // be Available even if its degraded. For example, a component may desire 3 + // running pods, but 1 pod is crash-looping. The component is Available but + // Degraded because it may have a lower quality of service. A component may be + // Progressing but not Degraded because the transition from one state to + // another does not persist over a long enough period to report Degraded. A + // component should not report Degraded during the course of a normal upgrade. + // A component may report Degraded in response to a persistent infrastructure + // failure that requires eventual administrator intervention. For example, if + // a control plane host is unhealthy and must be replaced. A component should + // report Degraded if unexpected errors occur over a period, but the + // expectation is that all unexpected errors are handled as operators mature. OperatorDegraded ClusterStatusConditionType = "Degraded" - // Upgradeable indicates whether the operator safe to upgrade based on the current cluster state. When status is `False` - // administrators should not upgrade their cluster and the message field should contain a human readable description - // of what the administrator should do to allow the operator to successfully update. A missing condition, True, - // and Unknown are all treated by the CVO as allowing an upgrade. + // Upgradeable indicates whether the component (operator and all configured + // operands) is safe to upgrade based on the current cluster state. When + // Upgradeable is False, the cluster-version operator will prevent the + // cluster from performing impacted updates unless forced. When set on + // ClusterVersion, the message will explain which updates (minor or patch) + // are impacted. When set on ClusterOperator, False will block minor + // OpenShift updates. The message field should contain a human readable + // description of what the administrator should do to allow the cluster or + // component to successfully update. The cluster-version operator will + // allow updates when this condition is not False, including when it is + // missing, True, or Unknown. OperatorUpgradeable ClusterStatusConditionType = "Upgradeable" ) diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 634efaa3e..d23f8ff3c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -123,14 +123,26 @@ type ClusterVersionStatus struct { // +optional Conditions []ClusterOperatorStatusCondition `json:"conditions,omitempty"` - // availableUpdates contains the list of updates that are appropriate - // for this cluster. This list may be empty if no updates are recommended, - // if the update service is unavailable, or if an invalid channel has - // been specified. + // availableUpdates contains updates recommended for this + // cluster. Updates which appear in conditionalUpdates but not in + // availableUpdates may expose this cluster to known issues. This list + // may be empty if no updates are recommended, if the update service + // is unavailable, or if an invalid channel has been specified. // +nullable // +kubebuilder:validation:Required // +required AvailableUpdates []Release `json:"availableUpdates"` + + // conditionalUpdates contains the list of updates that may be + // recommended for this cluster if it meets specific required + // conditions. Consumers interested in the set of updates that are + // actually recommended for this cluster should use + // availableUpdates. This list may be empty if no updates are + // recommended, if the update service is unavailable, or if an empty + // or invalid channel has been specified. + // +listType=atomic + // +optional + ConditionalUpdates []ConditionalUpdate `json:"conditionalUpdates,omitempty"` } // UpdateState is a constant representing whether an update was successfully @@ -190,6 +202,14 @@ type UpdateHistory struct { // +kubebuilder:validation:Required // +required Verified bool `json:"verified"` + + // acceptedRisks records risks which were accepted to initiate the update. + // For example, it may menition an Upgradeable=False or missing signature + // that was overriden via desiredUpdate.force, or an update that was + // initiated despite not being in the availableUpdates set of recommended + // update targets. + // +optional + AcceptedRisks string `json:"acceptedRisks,omitempty"` } // ClusterID is string RFC4122 uuid. @@ -290,6 +310,112 @@ type Release struct { // availableUpdates field is accurate and recent. const RetrievedUpdates ClusterStatusConditionType = "RetrievedUpdates" +// ConditionalUpdate represents an update which is recommended to some +// clusters on the version the current cluster is reconciling, but which +// may not be recommended for the current cluster. +type ConditionalUpdate struct { + // release is the target of the update. + // +kubebuilder:validation:Required + // +required + Release Release `json:"release"` + + // risks represents the range of issues associated with + // updating to the target release. The cluster-version + // operator will evaluate all entries, and only recommend the + // update if there is at least one entry and all entries + // recommend the update. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=map + // +listMapKey=name + // +required + Risks []ConditionalUpdateRisk `json:"risks" patchStrategy:"merge" patchMergeKey:"name"` + + // conditions represents the observations of the conditional update's + // current status. Known types are: + // * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. + // * Recommended, for whether the update is recommended for the current cluster. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// ConditionalUpdateRisk represents a reason and cluster-state +// for not recommending a conditional update. +// +k8s:deepcopy-gen=true +type ConditionalUpdateRisk struct { + // url contains information about this risk. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:MinLength=1 + // +required + URL string `json:"url"` + + // name is the CamelCase reason for not recommending a + // conditional update, in the event that matchingRules match the + // cluster state. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Name string `json:"name"` + + // message provides additional information about the risk of + // updating, in the event that matchingRules match the cluster + // state. This is only to be consumed by humans. It may + // contain Line Feed characters (U+000A), which should be + // rendered as new lines. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Message string `json:"message"` + + // matchingRules is a slice of conditions for deciding which + // clusters match the risk and which do not. The slice is + // ordered by decreasing precedence. The cluster-version + // operator will walk the slice in order, and stop after the + // first it can successfully evaluate. If no condition can be + // successfully evaluated, the update will not be recommended. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +required + MatchingRules []ClusterCondition `json:"matchingRules"` +} + +// ClusterCondition is a union of typed cluster conditions. The 'type' +// property determines which of the type-specific properties are relevant. +// When evaluated on a cluster, the condition may match, not match, or +// fail to evaluate. +// +k8s:deepcopy-gen=true +type ClusterCondition struct { + // type represents the cluster-condition type. This defines + // the members and semantics of any additional properties. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum={"Always","PromQL"} + // +required + Type string `json:"type"` + + // promQL represents a cluster condition based on PromQL. + // +optional + PromQL *PromQLClusterCondition `json:"promql,omitempty"` +} + +// PromQLClusterCondition represents a cluster condition based on PromQL. +type PromQLClusterCondition struct { + // PromQL is a PromQL query classifying clusters. This query + // query should return a 1 in the match case and a 0 in the + // does-not-match case case. Queries which return no time + // series, or which return values besides 0 or 1, are + // evaluation failures. + // +kubebuilder:validation:Required + // +required + PromQL string `json:"promql"` +} + // ClusterVersionList is a list of ClusterVersion resources. // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 2f0dce3b5..2e98aba56 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -111,13 +111,19 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ Disabled: []string{}, }, TechPreviewNoUpgrade: newDefaultFeatures(). - with("CSIDriverAzureDisk"). // sig-storage, jsafrane, OCP specific - with("CSIDriverVSphere"). // sig-storage, jsafrane, OCP specific - with("CSIMigrationAWS"). // sig-storage, jsafrane, Kubernetes feature gate - with("CSIMigrationOpenStack"). // sig-storage, jsafrane, Kubernetes feature gate - with("CSIMigrationGCE"). // sig-storage, fbertina, Kubernetes feature gate - with("CSIMigrationAzureDisk"). // sig-storage, fbertina, Kubernetes feature gate - with("ExternalCloudProvider"). // sig-cloud-provider, jspeed, OCP specific + with("CSIDriverAzureDisk"). // sig-storage, jsafrane, OCP specific + with("CSIDriverAzureFile"). // sig-storage, fbertina, OCP specific + with("CSIDriverVSphere"). // sig-storage, jsafrane, OCP specific + with("CSIMigrationAWS"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationOpenStack"). // sig-storage, jsafrane, Kubernetes feature gate + with("CSIMigrationGCE"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationAzureDisk"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationAzureFile"). // sig-storage, fbertina, Kubernetes feature gate + with("CSIMigrationvSphere"). // sig-storage, fbertina, Kubernetes feature gate + with("ExternalCloudProvider"). // sig-cloud-provider, jspeed, OCP specific + with("InsightsOperatorPullingSCA"). // insights-operator/ccx, tremes, OCP specific + with("CSIDriverSharedResource"). // sig-build, adkaplan, OCP specific + with("BuildCSIVolumes"). // sig-build, adkaplan, OCP specific toFeatures(), LatencySensitive: newDefaultFeatures(). with( @@ -139,6 +145,7 @@ var defaultFeatures = &FeatureGateEnabledDisabled{ "NodeDisruptionExclusion", // sig-scheduling, ccoleman "ServiceNodeExclusion", // sig-scheduling, ccoleman "DownwardAPIHugePages", // sig-node, rphillips + "PodSecurity", // sig-auth, s-urbaniak }, Disabled: []string{ "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go new file mode 100644 index 000000000..8ccad9c53 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -0,0 +1,89 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. +// When multiple policies are defined, the outcome of the behavior is defined on each field. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ImageContentPolicySpec `json:"spec"` +} + +// ImageContentPolicySpec is the specification of the ImageContentPolicy CRD. +type ImageContentPolicySpec struct { + // repositoryDigestMirrors allows images referenced by image digests in pods to be + // pulled from alternative mirrored repository locations. The image pull specification + // provided to the pod will be compared to the source locations described in RepositoryDigestMirrors + // and the image may be pulled down from any of the mirrors in the list instead of the + // specified repository allowing administrators to choose a potentially faster mirror. + // To pull image from mirrors by tags, should set the "allowMirrorByTags". + // + // Each “source” repository is treated independently; configurations for different “source” + // repositories don’t interact. + // + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. + // + // When multiple policies are defined for the same “source” repository, the sets of defined + // mirrors will be merged together, preserving the relative order of the mirrors, if possible. + // For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the + // mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict + // (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified. + // +optional + // +listType=map + // +listMapKey=source + RepositoryDigestMirrors []RepositoryDigestMirrors `json:"repositoryDigestMirrors"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageContentPolicyList lists the items in the ImageContentPolicy CRD. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageContentPolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + + Items []ImageContentPolicy `json:"items"` +} + +// RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config. +type RepositoryDigestMirrors struct { + // source is the repository that users refer to, e.g. in image pull specifications. + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` + Source string `json:"source"` + // allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. + // Pulling images by tag can potentially yield different images, depending on which endpoint + // we pull from. Forcing digest-pulls for mirrors avoids that issue. + // +optional + AllowMirrorByTags bool `json:"allowMirrorByTags,omitempty"` + // mirrors is zero or more repositories that may also contain the same images. + // If the "mirrors" is not specified, the image will continue to be pulled from the specified + // repository in the pull spec. No mirror will be configured. + // The order of mirrors in this list is treated as the user's desired priority, while source + // is by default considered lower priority than all mirrors. Other cluster configuration, + // including (but not limited to) other repositoryDigestMirrors objects, + // may impact the exact order mirrors are contacted in, or some mirrors may be contacted + // in parallel, so this should be considered a preference rather than a guarantee of ordering. + // +optional + // +listType=set + Mirrors []Mirror `json:"mirrors,omitempty"` +} + +// +kubebuilder:validation:Pattern=`^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])(:[0-9]+)?(\/[^\/:\n]+)*(\/[^\/:\n]+((:[^\/:\n]+)|(@[^\n]+)))?$` +type Mirror string diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 131e26ba7..4863a98e7 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -124,7 +124,7 @@ const ( ) // PlatformType is a specific supported infrastructure provider. -// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal +// +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud type PlatformType string const ( @@ -163,6 +163,12 @@ const ( // EquinixMetalPlatformType represents Equinix Metal infrastructure. EquinixMetalPlatformType PlatformType = "EquinixMetal" + + // PowerVSPlatformType represents IBM Power Systems Virtual Servers infrastructure. + PowerVSPlatformType PlatformType = "PowerVS" + + // AlibabaCloudPlatformType represents Alibaba Cloud infrastructure. + AlibabaCloudPlatformType PlatformType = "AlibabaCloud" ) // IBMCloudProviderType is a specific supported IBM Cloud provider cluster type @@ -185,9 +191,9 @@ type PlatformSpec struct { // balancers, dynamic volume provisioning, machine creation and deletion, and // other integrations are enabled. If None, no infrastructure automation is // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", and "None". Individual components may not support - // all platforms, and must handle unrecognized platforms as None if they do - // not support that platform. + // "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", + // "AlibabaCloud" and "None". Individual components may not support all platforms, + // and must handle unrecognized platforms as None if they do not support that platform. // // +unionDiscriminator Type PlatformType `json:"type"` @@ -231,6 +237,14 @@ type PlatformSpec struct { // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + + // PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformSpec `json:"powervs,omitempty"` + + // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` } // PlatformStatus holds the current status specific to the underlying infrastructure provider @@ -242,9 +256,9 @@ type PlatformStatus struct { // balancers, dynamic volume provisioning, machine creation and deletion, and // other integrations are enabled. If None, no infrastructure automation is // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", "oVirt", "EquinixMetal", and "None". Individual components may not support - // all platforms, and must handle unrecognized platforms as None if they do - // not support that platform. + // "OpenStack", "VSphere", "oVirt", "EquinixMetal", "PowerVS", "AlibabaCloud" and "None". + // Individual components may not support all platforms, and must handle + // unrecognized platforms as None if they do not support that platform. // // This value will be synced with to the `status.platform` and `status.platformStatus.type`. // Currently this value cannot be changed once set. @@ -289,6 +303,14 @@ type PlatformStatus struct { // EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. // +optional EquinixMetal *EquinixMetalPlatformStatus `json:"equinixMetal,omitempty"` + + // PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + // +optional + PowerVS *PowerVSPlatformStatus `json:"powervs,omitempty"` + + // AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + // +optional + AlibabaCloud *AlibabaCloudPlatformStatus `json:"alibabaCloud,omitempty"` } // AWSServiceEndpoint store the configuration of a custom url to @@ -573,6 +595,89 @@ type EquinixMetalPlatformStatus struct { IngressIP string `json:"ingressIP,omitempty"` } +// PowervsServiceEndpoint stores the configuration of a custom url to +// override existing defaults of PowerVS Services. +type PowerVSServiceEndpoint struct { + // name is the name of the Power VS service. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z0-9-]+$` + Name string `json:"name"` + + // url is fully qualified URI with scheme https, that overrides the default generated + // endpoint for a client. + // This must be provided and cannot be empty. + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=uri + // +kubebuilder:validation:Pattern=`^https://` + URL string `json:"url"` +} + +// PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. +// This only includes fields that can be modified in the cluster. +type PowerVSPlatformSpec struct{} + +// PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider. +type PowerVSPlatformStatus struct { + // region holds the default Power VS region for new Power VS resources created by the cluster. + Region string `json:"region"` + + // zone holds the default zone for the new Power VS resources created by the cluster. + // Note: Currently only single-zone OCP clusters are supported + Zone string `json:"zone"` + + // serviceEndpoints is a list of custom endpoints which will override the default + // service endpoints of a Power VS service. + // +optional + ServiceEndpoints []PowerVSServiceEndpoint `json:"serviceEndpoints,omitempty"` + + // CISInstanceCRN is the CRN of the Cloud Internet Services instance managing + // the DNS zone for the cluster's base domain + CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` +} + +// AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. +// This only includes fields that can be modified in the cluster. +type AlibabaCloudPlatformSpec struct{} + +// AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider. +type AlibabaCloudPlatformStatus struct { + // region specifies the region for Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z-]+$` + // +required + Region string `json:"region"` + // resourceGroupID is the ID of the resource group for the cluster. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^rg-[0-9A-Za-z]+$` + // +required + ResourceGroupID string `json:"resourceGroupID"` + // resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + // +kubebuilder:validation:MaxItems=20 + // +listType=map + // +listMapKey=key + // +optional + ResourceTags []AlibabaCloudResourceTag `json:"resourceTags,omitempty"` +} + +// AlibabaCloudResourceTag is the set of tags to add to apply to resources. +type AlibabaCloudResourceTag struct { + // key is the key of the tag. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Key string `json:"key"` + // value is the value of the tag. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Value string `json:"value"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // InfrastructureList is diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index 5b5849b65..02fbbf9d4 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -31,6 +31,7 @@ type OAuthSpec struct { // identityProviders is an ordered list of ways for a user to identify themselves. // When this list is empty, no identities are provisioned for users. // +optional + // +listType=atomic IdentityProviders []IdentityProvider `json:"identityProviders,omitempty"` // tokenConfig contains options for authorization and access tokens @@ -66,6 +67,8 @@ type TokenConfig struct { // per client, then that value takes precedence. If the timeout value is // not specified and the client does not override the value, then tokens // are valid until their lifetime. + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value // +optional AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` } @@ -537,22 +540,37 @@ type OpenIDIdentityProvider struct { // iss Claim and the sub Claim." const UserIDClaim = "sub" +// OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo +// responses +// +kubebuilder:validation:MinLength=1 +type OpenIDClaim string + // OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider type OpenIDClaims struct { // preferredUsername is the list of claims whose values should be used as the preferred username. // If unspecified, the preferred username is determined from the value of the sub claim + // +listType=atomic // +optional PreferredUsername []string `json:"preferredUsername,omitempty"` // name is the list of claims whose values should be used as the display name. Optional. // If unspecified, no display name is set for the identity + // +listType=atomic // +optional Name []string `json:"name,omitempty"` // email is the list of claims whose values should be used as the email address. Optional. // If unspecified, no email is set for the identity + // +listType=atomic // +optional Email []string `json:"email,omitempty"` + + // groups is the list of claims value of which should be used to synchronize groups + // from the OIDC provider to OpenShift for the user. + // If multiple claims are specified, the first one with a non-empty value is used. + // +listType=atomic + // +optional + Groups []OpenIDClaim `json:"groups,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 35e78bb69..01ee4690d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -35,7 +35,7 @@ type ProxySpec struct { // +optional HTTPSProxy string `json:"httpsProxy,omitempty"` - // noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. + // noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. // Empty means unset and will not result in an env var. // +optional NoProxy string `json:"noProxy,omitempty"` diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 9926aa1f5..75536108b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -307,6 +307,59 @@ func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformSpec) DeepCopyInto(out *AlibabaCloudPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformSpec. +func (in *AlibabaCloudPlatformSpec) DeepCopy() *AlibabaCloudPlatformSpec { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudPlatformStatus) DeepCopyInto(out *AlibabaCloudPlatformStatus) { + *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AlibabaCloudResourceTag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudPlatformStatus. +func (in *AlibabaCloudPlatformStatus) DeepCopy() *AlibabaCloudPlatformStatus { + if in == nil { + return nil + } + out := new(AlibabaCloudPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudResourceTag) DeepCopyInto(out *AlibabaCloudResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudResourceTag. +func (in *AlibabaCloudResourceTag) DeepCopy() *AlibabaCloudResourceTag { + if in == nil { + return nil + } + out := new(AlibabaCloudResourceTag) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Audit) DeepCopyInto(out *Audit) { *out = *in @@ -737,6 +790,27 @@ func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + if in.PromQL != nil { + in, out := &in.PromQL, &out.PromQL + *out = new(PromQLClusterCondition) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { *out = *in @@ -993,6 +1067,13 @@ func (in *ClusterVersionStatus) DeepCopyInto(out *ClusterVersionStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ConditionalUpdates != nil { + in, out := &in.ConditionalUpdates, &out.ConditionalUpdates + *out = make([]ConditionalUpdate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1077,6 +1158,60 @@ func (in *ComponentRouteStatus) DeepCopy() *ComponentRouteStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdate) DeepCopyInto(out *ConditionalUpdate) { + *out = *in + in.Release.DeepCopyInto(&out.Release) + if in.Risks != nil { + in, out := &in.Risks, &out.Risks + *out = make([]ConditionalUpdateRisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdate. +func (in *ConditionalUpdate) DeepCopy() *ConditionalUpdate { + if in == nil { + return nil + } + out := new(ConditionalUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionalUpdateRisk) DeepCopyInto(out *ConditionalUpdateRisk) { + *out = *in + if in.MatchingRules != nil { + in, out := &in.MatchingRules, &out.MatchingRules + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionalUpdateRisk. +func (in *ConditionalUpdateRisk) DeepCopy() *ConditionalUpdateRisk { + if in == nil { + return nil + } + out := new(ConditionalUpdateRisk) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConfigMapFileReference) DeepCopyInto(out *ConfigMapFileReference) { *out = *in @@ -2047,6 +2182,89 @@ func (in *Image) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicy) DeepCopyInto(out *ImageContentPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicy. +func (in *ImageContentPolicy) DeepCopy() *ImageContentPolicy { + if in == nil { + return nil + } + out := new(ImageContentPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicyList) DeepCopyInto(out *ImageContentPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageContentPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicyList. +func (in *ImageContentPolicyList) DeepCopy() *ImageContentPolicyList { + if in == nil { + return nil + } + out := new(ImageContentPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageContentPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageContentPolicySpec) DeepCopyInto(out *ImageContentPolicySpec) { + *out = *in + if in.RepositoryDigestMirrors != nil { + in, out := &in.RepositoryDigestMirrors, &out.RepositoryDigestMirrors + *out = make([]RepositoryDigestMirrors, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageContentPolicySpec. +func (in *ImageContentPolicySpec) DeepCopy() *ImageContentPolicySpec { + if in == nil { + return nil + } + out := new(ImageContentPolicySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { *out = *in @@ -2906,6 +3124,11 @@ func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]OpenIDClaim, len(*in)) + copy(*out, *in) + } return } @@ -3186,6 +3409,16 @@ func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { *out = new(EquinixMetalPlatformSpec) **out = **in } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformSpec) + **out = **in + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformSpec) + **out = **in + } return } @@ -3252,6 +3485,16 @@ func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { *out = new(EquinixMetalPlatformStatus) **out = **in } + if in.PowerVS != nil { + in, out := &in.PowerVS, &out.PowerVS + *out = new(PowerVSPlatformStatus) + (*in).DeepCopyInto(*out) + } + if in.AlibabaCloud != nil { + in, out := &in.AlibabaCloud, &out.AlibabaCloud + *out = new(AlibabaCloudPlatformStatus) + (*in).DeepCopyInto(*out) + } return } @@ -3265,6 +3508,59 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformSpec. +func (in *PowerVSPlatformSpec) DeepCopy() *PowerVSPlatformSpec { + if in == nil { + return nil + } + out := new(PowerVSPlatformSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSPlatformStatus) DeepCopyInto(out *PowerVSPlatformStatus) { + *out = *in + if in.ServiceEndpoints != nil { + in, out := &in.ServiceEndpoints, &out.ServiceEndpoints + *out = make([]PowerVSServiceEndpoint, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSPlatformStatus. +func (in *PowerVSPlatformStatus) DeepCopy() *PowerVSPlatformStatus { + if in == nil { + return nil + } + out := new(PowerVSPlatformStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSServiceEndpoint) DeepCopyInto(out *PowerVSServiceEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSServiceEndpoint. +func (in *PowerVSServiceEndpoint) DeepCopy() *PowerVSServiceEndpoint { + if in == nil { + return nil + } + out := new(PowerVSServiceEndpoint) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Project) DeepCopyInto(out *Project) { *out = *in @@ -3359,6 +3655,22 @@ func (in *ProjectStatus) DeepCopy() *ProjectStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PromQLClusterCondition) DeepCopyInto(out *PromQLClusterCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PromQLClusterCondition. +func (in *PromQLClusterCondition) DeepCopy() *PromQLClusterCondition { + if in == nil { + return nil + } + out := new(PromQLClusterCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Proxy) DeepCopyInto(out *Proxy) { *out = *in @@ -3548,6 +3860,27 @@ func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryDigestMirrors) DeepCopyInto(out *RepositoryDigestMirrors) { + *out = *in + if in.Mirrors != nil { + in, out := &in.Mirrors, &out.Mirrors + *out = make([]Mirror, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryDigestMirrors. +func (in *RepositoryDigestMirrors) DeepCopy() *RepositoryDigestMirrors { + if in == nil { + return nil + } + out := new(RepositoryDigestMirrors) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 8287ce6e1..00ad83eb9 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -323,7 +323,7 @@ func (APIServerSpec) SwaggerDoc() map[string]string { } var map_Audit = map[string]string{ - "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: to raise a Red Hat support request, it is required to set this to Default, WriteRequestBodies, or AllRequestBodies to generate audit log events that can be analyzed by support.\n\nIf unset, the 'Default' profile is used as the default.", + "profile": "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules.\n\nThe following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events\n (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody\n level).\n- WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens.\n\nWarning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly.\n\nIf unset, the 'Default' profile is used as the default.", "customRules": "customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies.", } @@ -528,6 +528,16 @@ func (OperandVersion) SwaggerDoc() map[string]string { return map_OperandVersion } +var map_ClusterCondition = map[string]string{ + "": "ClusterCondition is a union of typed cluster conditions. The 'type' property determines which of the type-specific properties are relevant. When evaluated on a cluster, the condition may match, not match, or fail to evaluate.", + "type": "type represents the cluster-condition type. This defines the members and semantics of any additional properties.", + "promql": "promQL represents a cluster condition based on PromQL.", +} + +func (ClusterCondition) SwaggerDoc() map[string]string { + return map_ClusterCondition +} + var map_ClusterVersion = map[string]string{ "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", @@ -566,7 +576,8 @@ var map_ClusterVersionStatus = map[string]string{ "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent a previous version.", "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", - "availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "availableUpdates": "availableUpdates contains updates recommended for this cluster. Updates which appear in conditionalUpdates but not in availableUpdates may expose this cluster to known issues. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", + "conditionalUpdates": "conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified.", } func (ClusterVersionStatus) SwaggerDoc() map[string]string { @@ -586,6 +597,38 @@ func (ComponentOverride) SwaggerDoc() map[string]string { return map_ComponentOverride } +var map_ConditionalUpdate = map[string]string{ + "": "ConditionalUpdate represents an update which is recommended to some clusters on the version the current cluster is reconciling, but which may not be recommended for the current cluster.", + "release": "release is the target of the update.", + "risks": "risks represents the range of issues associated with updating to the target release. The cluster-version operator will evaluate all entries, and only recommend the update if there is at least one entry and all entries recommend the update.", + "conditions": "conditions represents the observations of the conditional update's current status. Known types are: * Evaluating, for whether the cluster-version operator will attempt to evaluate any risks[].matchingRules. * Recommended, for whether the update is recommended for the current cluster.", +} + +func (ConditionalUpdate) SwaggerDoc() map[string]string { + return map_ConditionalUpdate +} + +var map_ConditionalUpdateRisk = map[string]string{ + "": "ConditionalUpdateRisk represents a reason and cluster-state for not recommending a conditional update.", + "url": "url contains information about this risk.", + "name": "name is the CamelCase reason for not recommending a conditional update, in the event that matchingRules match the cluster state.", + "message": "message provides additional information about the risk of updating, in the event that matchingRules match the cluster state. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", + "matchingRules": "matchingRules is a slice of conditions for deciding which clusters match the risk and which do not. The slice is ordered by decreasing precedence. The cluster-version operator will walk the slice in order, and stop after the first it can successfully evaluate. If no condition can be successfully evaluated, the update will not be recommended.", +} + +func (ConditionalUpdateRisk) SwaggerDoc() map[string]string { + return map_ConditionalUpdateRisk +} + +var map_PromQLClusterCondition = map[string]string{ + "": "PromQLClusterCondition represents a cluster condition based on PromQL.", + "promql": "PromQL is a PromQL query classifying clusters. This query query should return a 1 in the match case and a 0 in the does-not-match case case. Queries which return no time series, or which return values besides 0 or 1, are evaluation failures.", +} + +func (PromQLClusterCondition) SwaggerDoc() map[string]string { + return map_PromQLClusterCondition +} + var map_Release = map[string]string{ "": "Release represents an OpenShift release image and associated metadata.", "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", @@ -617,6 +660,7 @@ var map_UpdateHistory = map[string]string{ "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", "image": "image is a container image location that contains the update. This value is always populated.", "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", } func (UpdateHistory) SwaggerDoc() map[string]string { @@ -801,6 +845,43 @@ func (RegistrySources) SwaggerDoc() map[string]string { return map_RegistrySources } +var map_ImageContentPolicy = map[string]string{ + "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "spec holds user settable values for configuration", +} + +func (ImageContentPolicy) SwaggerDoc() map[string]string { + return map_ImageContentPolicy +} + +var map_ImageContentPolicyList = map[string]string{ + "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ImageContentPolicyList) SwaggerDoc() map[string]string { + return map_ImageContentPolicyList +} + +var map_ImageContentPolicySpec = map[string]string{ + "": "ImageContentPolicySpec is the specification of the ImageContentPolicy CRD.", + "repositoryDigestMirrors": "repositoryDigestMirrors allows images referenced by image digests in pods to be pulled from alternative mirrored repository locations. The image pull specification provided to the pod will be compared to the source locations described in RepositoryDigestMirrors and the image may be pulled down from any of the mirrors in the list instead of the specified repository allowing administrators to choose a potentially faster mirror. To pull image from mirrors by tags, should set the \"allowMirrorByTags\".\n\nEach “source” repository is treated independently; configurations for different “source” repositories don’t interact.\n\nIf the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec.\n\nWhen multiple policies are defined for the same “source” repository, the sets of defined mirrors will be merged together, preserving the relative order of the mirrors, if possible. For example, if policy A has mirrors `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be used in the order `a, b, c, d, e`. If the orders of mirror entries conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected but the resulting order is unspecified.", +} + +func (ImageContentPolicySpec) SwaggerDoc() map[string]string { + return map_ImageContentPolicySpec +} + +var map_RepositoryDigestMirrors = map[string]string{ + "": "RepositoryDigestMirrors holds cluster-wide information about how to handle mirrors in the registries config.", + "source": "source is the repository that users refer to, e.g. in image pull specifications.", + "allowMirrorByTags": "allowMirrorByTags if true, the mirrors can be used to pull the images that are referenced by their tags. Default is false, the mirrors only work when pulling the images that are referenced by their digests. Pulling images by tag can potentially yield different images, depending on which endpoint we pull from. Forcing digest-pulls for mirrors avoids that issue.", + "mirrors": "mirrors is zero or more repositories that may also contain the same images. If the \"mirrors\" is not specified, the image will continue to be pulled from the specified repository in the pull spec. No mirror will be configured. The order of mirrors in this list is treated as the user's desired priority, while source is by default considered lower priority than all mirrors. Other cluster configuration, including (but not limited to) other repositoryDigestMirrors objects, may impact the exact order mirrors are contacted in, or some mirrors may be contacted in parallel, so this should be considered a preference rather than a guarantee of ordering.", +} + +func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { + return map_RepositoryDigestMirrors +} + var map_AWSPlatformSpec = map[string]string{ "": "AWSPlatformSpec holds the desired state of the Amazon Web Services infrastructure provider. This only includes fields that can be modified in the cluster.", "serviceEndpoints": "serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service.", @@ -841,6 +922,35 @@ func (AWSServiceEndpoint) SwaggerDoc() map[string]string { return map_AWSServiceEndpoint } +var map_AlibabaCloudPlatformSpec = map[string]string{ + "": "AlibabaCloudPlatformSpec holds the desired state of the Alibaba Cloud infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (AlibabaCloudPlatformSpec) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformSpec +} + +var map_AlibabaCloudPlatformStatus = map[string]string{ + "": "AlibabaCloudPlatformStatus holds the current status of the Alibaba Cloud infrastructure provider.", + "region": "region specifies the region for Alibaba Cloud resources created for the cluster.", + "resourceGroupID": "resourceGroupID is the ID of the resource group for the cluster.", + "resourceTags": "resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster.", +} + +func (AlibabaCloudPlatformStatus) SwaggerDoc() map[string]string { + return map_AlibabaCloudPlatformStatus +} + +var map_AlibabaCloudResourceTag = map[string]string{ + "": "AlibabaCloudResourceTag is the set of tags to add to apply to resources.", + "key": "key is the key of the tag.", + "value": "value is the value of the tag.", +} + +func (AlibabaCloudResourceTag) SwaggerDoc() map[string]string { + return map_AlibabaCloudResourceTag +} + var map_AzurePlatformSpec = map[string]string{ "": "AzurePlatformSpec holds the desired state of the Azure infrastructure provider. This only includes fields that can be modified in the cluster.", } @@ -1039,7 +1149,7 @@ func (OvirtPlatformStatus) SwaggerDoc() map[string]string { var map_PlatformSpec = map[string]string{ "": "PlatformSpec holds the desired state specific to the underlying infrastructure provider of the current cluster. Since these are used at spec-level for the underlying cluster, it is supposed that only one of the spec structs is set.", - "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"KubeVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", "azure": "Azure contains settings specific to the Azure infrastructure provider.", "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", @@ -1050,6 +1160,8 @@ var map_PlatformSpec = map[string]string{ "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", } func (PlatformSpec) SwaggerDoc() map[string]string { @@ -1058,7 +1170,7 @@ func (PlatformSpec) SwaggerDoc() map[string]string { var map_PlatformStatus = map[string]string{ "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", - "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.\n\nThis value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set.", "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", "azure": "Azure contains settings specific to the Azure infrastructure provider.", "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", @@ -1069,12 +1181,44 @@ var map_PlatformStatus = map[string]string{ "ibmcloud": "IBMCloud contains settings specific to the IBMCloud infrastructure provider.", "kubevirt": "Kubevirt contains settings specific to the kubevirt infrastructure provider.", "equinixMetal": "EquinixMetal contains settings specific to the Equinix Metal infrastructure provider.", + "powervs": "PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider.", + "alibabaCloud": "AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider.", } func (PlatformStatus) SwaggerDoc() map[string]string { return map_PlatformStatus } +var map_PowerVSPlatformSpec = map[string]string{ + "": "PowerVSPlatformSpec holds the desired state of the IBM Power Systems Virtual Servers infrastructure provider. This only includes fields that can be modified in the cluster.", +} + +func (PowerVSPlatformSpec) SwaggerDoc() map[string]string { + return map_PowerVSPlatformSpec +} + +var map_PowerVSPlatformStatus = map[string]string{ + "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.", + "region": "region holds the default Power VS region for new Power VS resources created by the cluster.", + "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", + "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", +} + +func (PowerVSPlatformStatus) SwaggerDoc() map[string]string { + return map_PowerVSPlatformStatus +} + +var map_PowerVSServiceEndpoint = map[string]string{ + "": "PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services.", + "name": "name is the name of the Power VS service.", + "url": "url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty.", +} + +func (PowerVSServiceEndpoint) SwaggerDoc() map[string]string { + return map_PowerVSServiceEndpoint +} + var map_VSpherePlatformSpec = map[string]string{ "": "VSpherePlatformSpec holds the desired state of the vSphere infrastructure provider. This only includes fields that can be modified in the cluster.", } @@ -1423,6 +1567,7 @@ var map_OpenIDClaims = map[string]string{ "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the sub claim", "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user. If multiple claims are specified, the first one with a non-empty value is used.", } func (OpenIDClaims) SwaggerDoc() map[string]string { @@ -1464,7 +1609,7 @@ var map_TokenConfig = map[string]string{ "": "TokenConfig holds the necessary configuration options for authorization and access tokens", "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", - "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", } func (TokenConfig) SwaggerDoc() map[string]string { @@ -1585,7 +1730,7 @@ var map_ProxySpec = map[string]string{ "": "ProxySpec contains cluster proxy creation configuration.", "httpProxy": "httpProxy is the URL of the proxy for HTTP requests. Empty means unset and will not result in an env var.", "httpsProxy": "httpsProxy is the URL of the proxy for HTTPS requests. Empty means unset and will not result in an env var.", - "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs for which the proxy should not be used. Empty means unset and will not result in an env var.", + "noProxy": "noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used. Empty means unset and will not result in an env var.", "readinessEndpoints": "readinessEndpoints is a list of endpoints used to verify readiness of the proxy.", "trustedCA": "trustedCA is a reference to a ConfigMap containing a CA certificate bundle. The trustedCA field should only be consumed by a proxy validator. The validator is responsible for reading the certificate bundle from the required key \"ca-bundle.crt\", merging it with the system default trust bundle, and writing the merged trust bundle to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" namespace. Clients that expect to make proxy connections must use the trusted-ca-bundle for all HTTPS requests to the proxy, and may use the trusted-ca-bundle for non-proxy HTTPS requests as well.\n\nThe namespace for the ConfigMap referenced by trustedCA is \"openshift-config\". Here is an example ConfigMap (in yaml):\n\napiVersion: v1 kind: ConfigMap metadata:\n name: user-ca-bundle\n namespace: openshift-config\n data:\n ca-bundle.crt: |", } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml new file mode 100644 index 000000000..0e4fb4c5c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml @@ -0,0 +1,274 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + api-approved.openshift.io: https://github.com/openshift/api/pull/948 + name: machines.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: Machine + listKind: MachineList + plural: machines + singular: machine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Phase of machine + jsonPath: .status.phase + name: Phase + type: string + - description: Type of instance + jsonPath: .metadata.labels['machine\.openshift\.io/instance-type'] + name: Type + type: string + - description: Region associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/region'] + name: Region + type: string + - description: Zone associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/zone'] + name: Zone + type: string + - description: Machine age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Node associated with machine + jsonPath: .status.nodeRef.name + name: Node + priority: 1 + type: string + - description: Provider ID of machine created in cloud provider + jsonPath: .spec.providerID + name: ProviderID + priority: 1 + type: string + - description: State of instance + jsonPath: .metadata.annotations['machine\.openshift\.io/instance-state'] + name: State + priority: 1 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: 'Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSpec defines the desired state of Machine + type: object + properties: + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + status: + description: MachineStatus defines the observed state of Machine + type: object + properties: + addresses: + description: Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + type: array + items: + description: NodeAddress contains information for the node's address. + type: object + required: + - address + - type + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP or InternalIP. + type: string + conditions: + description: Conditions defines the current state of the Machine + type: array + items: + description: Condition defines an observation of a Machine API resource operational state. + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + errorMessage: + description: "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + errorReason: + description: "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + lastOperation: + description: LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully. + type: object + properties: + description: + description: Description is the human-readable description of the last operation. + type: string + lastUpdated: + description: LastUpdated is the timestamp at which LastOperation API was last-updated. + type: string + format: date-time + state: + description: State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc + type: string + type: + description: Type is the type of operation which was last performed. E.g. Create, Delete, Update etc + type: string + lastUpdated: + description: LastUpdated identifies when this status was last observed. + type: string + format: date-time + nodeRef: + description: NodeRef will point to the corresponding Node if it exists. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + phase: + description: 'Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting' + type: string + providerStatus: + description: ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml new file mode 100644 index 000000000..ae73bf3d1 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml @@ -0,0 +1,192 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + creationTimestamp: null + name: machinehealthchecks.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineHealthCheck + listKind: MachineHealthCheckList + plural: machinehealthchecks + shortNames: + - mhc + - mhcs + singular: machinehealthcheck + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Maximum number of unhealthy machines allowed + jsonPath: .spec.maxUnhealthy + name: MaxUnhealthy + type: string + - description: Number of machines currently monitored + jsonPath: .status.expectedMachines + name: ExpectedMachines + type: integer + - description: Current observed healthy machines + jsonPath: .status.currentHealthy + name: CurrentHealthy + type: integer + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of machine health check policy + type: object + properties: + maxUnhealthy: + description: Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by "selector" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation. + default: 100% + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + nodeStartupTimeout: + description: Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to "0". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + type: string + default: 10m + pattern: ^0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + remediationTemplate: + description: "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider. \n This field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator." + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + selector: + description: 'Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.' + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + unhealthyConditions: + description: UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy. + type: array + minItems: 1 + items: + description: UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy. + type: object + properties: + status: + type: string + minLength: 1 + timeout: + description: Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + type: string + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: + type: string + minLength: 1 + status: + description: Most recently observed status of MachineHealthCheck resource + type: object + properties: + conditions: + description: Conditions defines the current state of the MachineHealthCheck + type: array + items: + description: Condition defines an observation of a Machine API resource operational state. + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + currentHealthy: + description: total number of machines counted by this machine health check + type: integer + minimum: 0 + expectedMachines: + description: total number of machines counted by this machine health check + type: integer + minimum: 0 + remediationsAllowed: + description: RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied + type: integer + format: int32 + minimum: 0 + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml new file mode 100644 index 000000000..8bc57660d --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml @@ -0,0 +1,295 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + creationTimestamp: null + name: machinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineSet + listKind: MachineSetList + plural: machinesets + singular: machineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Observed number of available replicas + jsonPath: .status.availableReplicas + name: Available + type: string + - description: Machineset age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSetSpec defines the desired state of MachineSet + type: object + properties: + deletePolicy: + description: DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + type: string + enum: + - Random + - Newest + - Oldest + minReadySeconds: + description: MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready) + type: integer + format: int32 + replicas: + description: Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. + type: integer + format: int32 + default: 1 + selector: + description: 'Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template''s labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + template: + description: Template is the object that describes the machine that will be created if insufficient replicas are detected. + type: object + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + spec: + description: 'Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + type: object + properties: + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + uid: + description: 'UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + status: + description: MachineSetStatus defines the observed state of MachineSet + type: object + properties: + availableReplicas: + description: The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + type: integer + format: int32 + errorMessage: + type: string + errorReason: + description: "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption. \n These fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output." + type: string + fullyLabeledReplicas: + description: The number of replicas that have labels matching the labels of the machine template of the MachineSet. + type: integer + format: int32 + observedGeneration: + description: ObservedGeneration reflects the generation of the most recently observed MachineSet. + type: integer + format: int64 + readyReplicas: + description: The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + type: integer + format: int32 + replicas: + description: Replicas is the most recently observed number of replicas. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/doc.go b/vendor/github.com/openshift/api/machine/v1beta1/doc.go new file mode 100644 index 000000000..e14fc64e3 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=machine.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/register.go b/vendor/github.com/openshift/api/machine/v1beta1/register.go new file mode 100644 index 000000000..a3678c007 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/register.go @@ -0,0 +1,44 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "machine.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Machine{}, + &MachineList{}, + &MachineSet{}, + &MachineSetList{}, + &MachineHealthCheck{}, + &MachineHealthCheckList{}, + ) + + return nil +} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/awsproviderconfig_types.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go similarity index 79% rename from vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/awsproviderconfig_types.go rename to vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index 6e377f611..1df85939d 100644 --- a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/awsproviderconfig_types.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -1,19 +1,3 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package v1beta1 import ( @@ -21,89 +5,80 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API -// +k8s:openapi-gen=true +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AWSMachineProviderConfig struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // AMI is the reference to the AMI from which to create the machine instance. AMI AWSResourceReference `json:"ami"` - // InstanceType is the type of instance to create. Example: m4.xlarge InstanceType string `json:"instanceType"` - // Tags is the set of tags to add to apply to an instance, in addition to the ones // added by default by the actuator. These tags are additive. The actuator will ensure // these tags are present, but will not remove any other tags that may exist on the // instance. + // +optional Tags []TagSpecification `json:"tags,omitempty"` - // IAMInstanceProfile is a reference to an IAM role to assign to the instance + // +optional IAMInstanceProfile *AWSResourceReference `json:"iamInstanceProfile,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance + // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions // provided by attached IAM role where the actuator is running. + // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // KeyName is the name of the KeyPair to use for SSH + // +optional KeyName *string `json:"keyName,omitempty"` - // DeviceIndex is the index of the device on the instance for the network interface attachment. // Defaults to 0. DeviceIndex int64 `json:"deviceIndex"` - // PublicIP specifies whether the instance should get a public IP. If not present, // it should use the default of its subnet. + // +optional PublicIP *bool `json:"publicIp,omitempty"` - // SecurityGroups is an array of references to security groups that should be applied to the // instance. + // +optional SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` - // Subnet is a reference to the subnet to use for this instance Subnet AWSResourceReference `json:"subnet"` - // Placement specifies where to create the instance in AWS Placement Placement `json:"placement"` - // LoadBalancers is the set of load balancers to which the new instance // should be added once it is created. + // +optional LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"` - // BlockDevices is the set of block device mapping associated to this instance, // block device without a name will be used as a root device and only one device without a name is allowed // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html + // +optional BlockDevices []BlockDeviceMappingSpec `json:"blockDevices,omitempty"` - // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. + // +optional SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` } // BlockDeviceMappingSpec describes a block device mapping type BlockDeviceMappingSpec struct { - // The device name exposed to the machine (for example, /dev/sdh or xvdh). + // +optional DeviceName *string `json:"deviceName,omitempty"` - // Parameters used to automatically set up EBS volumes when the machine is // launched. + // +optional EBS *EBSBlockDeviceSpec `json:"ebs,omitempty"` - // Suppresses the specified device included in the block device mapping of the // AMI. + // +optional NoDevice *string `json:"noDevice,omitempty"` - // The virtual device name (ephemeralN). Machine store volumes are numbered // starting from 0. An machine type with 2 available machine store volumes // can specify mappings for ephemeral0 and ephemeral1.The number of available @@ -114,23 +89,23 @@ type BlockDeviceMappingSpec struct { // the block device mapping for the machine. When you launch an M3 machine, // we ignore any machine store volumes specified in the block device mapping // for the AMI. + // +optional VirtualName *string `json:"virtualName,omitempty"` } // EBSBlockDeviceSpec describes a block device for an EBS volume. // https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice type EBSBlockDeviceSpec struct { - // Indicates whether the EBS volume is deleted on machine termination. + // +optional DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` - // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes // may only be attached to machines that support Amazon EBS encryption. + // +optional Encrypted *bool `json:"encrypted,omitempty"` - // Indicates the KMS key that should be used to encrypt the Amazon EBS volume. + // +optional KMSKey AWSResourceReference `json:"kmsKey,omitempty"` - // The number of I/O operations per second (IOPS) that the volume supports. // For io1, this represents the number of IOPS that are provisioned for the // volume. For gp2, this represents the baseline performance of the volume and @@ -145,8 +120,8 @@ type EBSBlockDeviceSpec struct { // // Condition: This parameter is required for requests to create io1 volumes; // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // +optional Iops *int64 `json:"iops,omitempty"` - // The size of the volume, in GiB. // // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned @@ -157,10 +132,11 @@ type EBSBlockDeviceSpec struct { // // Default: If you're creating the volume from a snapshot and don't specify // a volume size, the default is the snapshot size. + // +optional VolumeSize *int64 `json:"volumeSize,omitempty"` - // The volume type: gp2, io1, st1, sc1, or standard. // Default: standard + // +optional VolumeType *string `json:"volumeType,omitempty"` } @@ -170,6 +146,7 @@ type EBSBlockDeviceSpec struct { type SpotMarketOptions struct { // The maximum price the user is willing to pay for their instances // Default: On-Demand price + // +optional MaxPrice *string `json:"maxPrice,omitempty"` } @@ -180,25 +157,25 @@ type AWSResourceReference struct { // ID of resource // +optional ID *string `json:"id,omitempty"` - // ARN of resource // +optional ARN *string `json:"arn,omitempty"` - // Filters is a set of filters used to identify a resource + // +optional Filters []Filter `json:"filters,omitempty"` } // Placement indicates where to create the instance in AWS type Placement struct { // Region is the region to use to create the instance + // +optional Region string `json:"region,omitempty"` - // AvailabilityZone is the availability zone of the instance + // +optional AvailabilityZone string `json:"availabilityZone,omitempty"` - // Tenancy indicates if instance should run on shared or single-tenant hardware. There are // supported 3 options: default, dedicated and host. + // +optional Tenancy InstanceTenancy `json:"tenancy,omitempty"` } @@ -206,8 +183,8 @@ type Placement struct { type Filter struct { // Name of the filter. Filter names are case-sensitive. Name string `json:"name"` - // Values includes one or more filter values. Filter values are case-sensitive. + // +optional Values []string `json:"values,omitempty"` } @@ -215,16 +192,16 @@ type Filter struct { type TagSpecification struct { // Name of the tag Name string `json:"name"` - // Value of the tag Value string `json:"value"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 type AWSMachineProviderConfigList struct { metav1.TypeMeta `json:",inline"` + // +optional metav1.ListMeta `json:"metadata,omitempty"` Items []AWSMachineProviderConfig `json:"items"` } @@ -258,6 +235,40 @@ const ( NetworkLoadBalancerType AWSLoadBalancerType = "network" // AWS Network Load Balancer (NLB) ) -func init() { - SchemeBuilder.Register(&AWSMachineProviderConfig{}, &AWSMachineProviderConfigList{}, &AWSMachineProviderStatus{}) +// AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains AWS-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AWSMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // InstanceID is the instance ID of the machine created in AWS + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the state of the AWS instance for this machine + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []AWSMachineProviderCondition `json:"conditions,omitempty"` +} + +// AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus. +type AWSMachineProviderCondition struct { + // Type is the type of the condition. + Type ConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go new file mode 100644 index 000000000..e185a0193 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -0,0 +1,232 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. +// Required parameters such as location that are not specified by this configuration, will be defaulted +// by the actuator. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AzureMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with Azure credentials. + // +optional + CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"` + // Location is the region to use to create the instance + // +optional + Location string `json:"location,omitempty"` + // VMSize is the size of the VM to create. + // +optional + VMSize string `json:"vmSize,omitempty"` + // Image is the OS image to use to create the instance. + Image Image `json:"image"` + // OSDisk represents the parameters for creating the OS disk. + OSDisk OSDisk `json:"osDisk"` + // SSHPublicKey is the public key to use to SSH to the virtual machine. + // +optional + SSHPublicKey string `json:"sshPublicKey,omitempty"` + // PublicIP if true a public IP will be used + PublicIP bool `json:"publicIP"` + // Tags is a list of tags to apply to the machine. + // +optional + Tags map[string]string `json:"tags,omitempty"` + // Network Security Group that needs to be attached to the machine's interface. + // No security group will be attached if empty. + // +optional + SecurityGroup string `json:"securityGroup,omitempty"` + // Application Security Groups that need to be attached to the machine's interface. + // No application security groups will be attached if zero-length. + // +optional + ApplicationSecurityGroups []string `json:"applicationSecurityGroups,omitempty"` + // Subnet to use for this instance + Subnet string `json:"subnet"` + // PublicLoadBalancer to use for this instance + // +optional + PublicLoadBalancer string `json:"publicLoadBalancer,omitempty"` + // InternalLoadBalancerName to use for this instance + // +optional + InternalLoadBalancer string `json:"internalLoadBalancer,omitempty"` + // NatRule to set inbound NAT rule of the load balancer + // +optional + NatRule *int64 `json:"natRule,omitempty"` + // ManagedIdentity to set managed identity name + // +optional + ManagedIdentity string `json:"managedIdentity,omitempty"` + // Vnet to set virtual network name + // +optional + Vnet string `json:"vnet,omitempty"` + // Availability Zone for the virtual machine. + // If nil, the virtual machine should be deployed to no zone + // +optional + Zone *string `json:"zone,omitempty"` + // NetworkResourceGroup is the resource group for the virtual machine's network + // +optional + NetworkResourceGroup string `json:"networkResourceGroup,omitempty"` + // ResourceGroup is the resource group for the virtual machine + // +optional + ResourceGroup string `json:"resourceGroup,omitempty"` + // SpotVMOptions allows the ability to specify the Machine should use a Spot VM + // +optional + SpotVMOptions *SpotVMOptions `json:"spotVMOptions,omitempty"` + // SecurityProfile specifies the Security profile settings for a virtual machine. + // +optional + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + // AcceleratedNetworking enables or disables Azure accelerated networking feature. + // Set to false by default. If true, then this will depend on whether the requested + // VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error. + // +optional + AcceleratedNetworking bool `json:"acceleratedNetworking,omitempty"` + // AvailabilitySet specifies the availability set to use for this instance. + // Availability set should be precreated, before using this field. + // +optional + AvailabilitySet string `json:"availabilitySet,omitempty"` +} + +// SpotVMOptions defines the options relevant to running the Machine on Spot VMs +type SpotVMOptions struct { + // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances + // +optional + MaxPrice *resource.Quantity `json:"maxPrice,omitempty"` +} + +// AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains Azure-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AzureMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // VMID is the ID of the virtual machine created in Azure. + // +optional + VMID *string `json:"vmId,omitempty"` + // VMState is the provisioning state of the Azure virtual machine. + // +optional + VMState *AzureVMState `json:"vmState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status. + // +optional + Conditions []AzureMachineProviderCondition `json:"conditions,omitempty"` +} + +// VMState describes the state of an Azure virtual machine. +type AzureVMState string + +const ( + // ProvisioningState related values + // VMStateCreating ... + VMStateCreating = AzureVMState("Creating") + // VMStateDeleting ... + VMStateDeleting = AzureVMState("Deleting") + // VMStateFailed ... + VMStateFailed = AzureVMState("Failed") + // VMStateMigrating ... + VMStateMigrating = AzureVMState("Migrating") + // VMStateSucceeded ... + VMStateSucceeded = AzureVMState("Succeeded") + // VMStateUpdating ... + VMStateUpdating = AzureVMState("Updating") + + // PowerState related values + // VMStateStarting ... + VMStateStarting = AzureVMState("Starting") + // VMStateRunning ... + VMStateRunning = AzureVMState("Running") + // VMStateStopping ... + VMStateStopping = AzureVMState("Stopping") + // VMStateStopped ... + VMStateStopped = AzureVMState("Stopped") + // VMStateDeallocating ... + VMStateDeallocating = AzureVMState("Deallocating") + // VMStateDeallocated ... + VMStateDeallocated = AzureVMState("Deallocated") + // VMStateUnknown ... + VMStateUnknown = AzureVMState("Unknown") +) + +// Image is a mirror of azure sdk compute.ImageReference +type Image struct { + // Publisher is the name of the organization that created the image + Publisher string `json:"publisher"` + // Offer specifies the name of a group of related images created by the publisher. + // For example, UbuntuServer, WindowsServer + Offer string `json:"offer"` + // SKU specifies an instance of an offer, such as a major release of a distribution. + // For example, 18.04-LTS, 2019-Datacenter + SKU string `json:"sku"` + // Version specifies the version of an image sku. The allowed formats + // are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. + // Specify 'latest' to use the latest version of an image available at deploy time. + // Even if you use 'latest', the VM image will not automatically update after deploy + // time even if a new version becomes available. + Version string `json:"version"` + // ResourceID specifies an image to use by ID + ResourceID string `json:"resourceID"` +} + +type OSDisk struct { + // OSType is the operating system type of the OS disk. Possible values include "Linux" and "Windows". + OSType string `json:"osType"` + // ManagedDisk specifies the Managed Disk parameters for the OS disk. + ManagedDisk ManagedDiskParameters `json:"managedDisk"` + // DiskSizeGB is the size in GB to assign to the data disk. + DiskSizeGB int32 `json:"diskSizeGB"` +} + +// ManagedDiskParameters is the parameters of a managed disk. +type ManagedDiskParameters struct { + // StorageAccountType is the storage account type to use. Possible values include "Standard_LRS" and "Premium_LRS". + StorageAccountType string `json:"storageAccountType"` + // DiskEncryptionSet is the disk encryption set properties + // +optional + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` +} + +// DiskEncryptionSetParameters is the disk encryption set properties +type DiskEncryptionSetParameters struct { + // ID is the disk encryption set ID + // +optional + ID string `json:"id,omitempty"` +} + +// SecurityProfile specifies the Security profile settings for a +// virtual machine or virtual machine scale set. +type SecurityProfile struct { + // This field indicates whether Host Encryption should be enabled + // or disabled for a virtual machine or virtual machine scale + // set. Default is disabled. + // +optional + EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` +} + +// AzureMachineProviderCondition is a condition in a AzureMachineProviderStatus +type AzureMachineProviderCondition struct { + // Type is the type of the condition. + Type ConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go new file mode 100644 index 000000000..d1e5b7fbf --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go @@ -0,0 +1,175 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type GCPMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with GCP credentials. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + // CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. + // This is required if you plan to use this instance to forward routes. + CanIPForward bool `json:"canIPForward"` + // DeletionProtection whether the resource should be protected against deletion. + DeletionProtection bool `json:"deletionProtection"` + // Disks is a list of disks to be attached to the VM. + // +optional + Disks []*GCPDisk `json:"disks,omitempty"` + // Labels list of labels to apply to the VM. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Metadata key/value pairs to apply to the VM. + // +optional + Metadata []*GCPMetadata `json:"gcpMetadata,omitempty"` + // NetworkInterfaces is a list of network interfaces to be attached to the VM. + // +optional + NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"` + // ServiceAccounts is a list of GCP service accounts to be used by the VM. + ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"` + // Tags list of tags to apply to the VM. + Tags []string `json:"tags,omitempty"` + // TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, + // an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool + // +optional + TargetPools []string `json:"targetPools,omitempty"` + // MachineType is the machine type to use for the VM. + MachineType string `json:"machineType"` + // Region is the region in which the GCP machine provider will create the VM. + Region string `json:"region"` + // Zone is the zone in which the GCP machine provider will create the VM. + Zone string `json:"zone"` + // ProjectID is the project in which the GCP machine provider will create the VM. + // +optional + ProjectID string `json:"projectID,omitempty"` + // Preemptible indicates if created instance is preemptible + // +optional + Preemptible bool `json:"preemptible,omitempty"` +} + +// GCPDisk describes disks for GCP. +type GCPDisk struct { + // AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false). + AutoDelete bool `json:"autoDelete"` + // Boot indicates if this is a boot disk (default false). + Boot bool `json:"boot"` + // SizeGB is the size of the disk (in GB). + SizeGB int64 `json:"sizeGb"` + // Type is the type of the disk (eg: pd-standard). + Type string `json:"type"` + // Image is the source image to create this disk. + Image string `json:"image"` + // Labels list of labels to apply to the disk. + Labels map[string]string `json:"labels"` + // EncryptionKey is the customer-supplied encryption key of the disk. + // +optional + EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"` +} + +// GCPMetadata describes metadata for GCP. +type GCPMetadata struct { + // Key is the metadata key. + Key string `json:"key"` + // Value is the metadata value. + Value *string `json:"value"` +} + +// GCPNetworkInterface describes network interfaces for GCP +type GCPNetworkInterface struct { + // PublicIP indicates if true a public IP will be used + PublicIP bool `json:"publicIP,omitempty"` + // Network is the network name. + Network string `json:"network,omitempty"` + // ProjectID is the project in which the GCP machine provider will create the VM. + ProjectID string `json:"projectID,omitempty"` + // Subnetwork is the subnetwork name. + Subnetwork string `json:"subnetwork,omitempty"` +} + +// GCPServiceAccount describes service accounts for GCP. +type GCPServiceAccount struct { + // Email is the service account email. + Email string `json:"email"` + // Scopes list of scopes to be assigned to the service account. + Scopes []string `json:"scopes"` +} + +// GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption. +type GCPEncryptionKeyReference struct { + // KMSKeyName is the reference KMS key, in the format + // +optional + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` + // KMSKeyServiceAccount is the service account being used for the + // encryption request for the given KMS key. If absent, the Compute + // Engine default service account is used. + // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account + // for details on the default service account. + // +optional + KMSKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // Name is the name of the customer managed encryption key to be used for the disk encryption. + Name string `json:"name"` + // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to. + KeyRing string `json:"keyRing"` + // ProjectID is the ID of the Project in which the KMS Key Ring exists. + // Defaults to the VM ProjectID if not set. + // +optional + ProjectID string `json:"projectID,omitempty"` + // Location is the GCP location in which the Key Ring exists. + Location string `json:"location"` +} + +// GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains GCP-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type GCPMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // InstanceID is the ID of the instance in GCP + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the provisioning state of the GCP Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []GCPMachineProviderCondition `json:"conditions,omitempty"` +} + +// GCPMachineProviderCondition is a condition in a GCPMachineProviderStatus +type GCPMachineProviderCondition struct { + // Type is the type of the condition. + Type ConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go similarity index 61% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go rename to vendor/github.com/openshift/api/machine/v1beta1/types_machine.go index a38c8909c..73c33e44d 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_types.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -1,30 +1,14 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package v1beta1 import ( - "fmt" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" ) +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + const ( // MachineFinalizer is set on PrepareForCreate callback. MachineFinalizer = "machine.machine.openshift.io" @@ -37,6 +21,120 @@ const ( MachineClusterIDLabel = "machine.openshift.io/cluster-api-cluster" ) +type MachineStatusError string + +const ( + // Represents that the combination of configuration in the MachineSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist, + InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" + + // This indicates that the MachineSpec has been updated in a way that + // is not supported for reconciliation on this cluster. The spec may be + // completely valid from a configuration standpoint, but the controller + // does not support changing the real world state to match the new + // spec. + // + // Example: the responsible controller is not capable of changing the + // container runtime from docker to rkt. + UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" + + // This generally refers to exceeding one's quota in a cloud provider, + // or running out of physical machines in an on-premise environment. + InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" + + // There was an error while trying to create a Node to match this + // Machine. This may indicate a transient problem that will be fixed + // automatically with time, such as a service outage, or a terminal + // error during creation that doesn't match a more specific + // MachineStatusError value. + // + // Example: timeout trying to connect to GCE. + CreateMachineError MachineStatusError = "CreateError" + + // There was an error while trying to update a Node that this + // Machine represents. This may indicate a transient problem that will be + // fixed automatically with time, such as a service outage, + // + // Example: error updating load balancers + UpdateMachineError MachineStatusError = "UpdateError" + + // An error was encountered while trying to delete the Node that this + // Machine represents. This could be a transient or terminal error, but + // will only be observable if the provider's Machine controller has + // added a finalizer to the object to more gracefully handle deletions. + // + // Example: cannot resolve EC2 IP address. + DeleteMachineError MachineStatusError = "DeleteError" + + // TemplateClonedFromGroupKindAnnotation is the infrastructure machine + // annotation that stores the group-kind of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a + // template. Older/adopted machines will not have this annotation. + TemplateClonedFromGroupKindAnnotation = "machine.openshift.io/cloned-from-groupkind" + + // TemplateClonedFromNameAnnotation is the infrastructure machine annotation that + // stores the name of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a + // template. Older/adopted machines will not have this annotation. + TemplateClonedFromNameAnnotation = "machine.openshift.io/cloned-from-name" + + // This error indicates that the machine did not join the cluster + // as a new node within the expected timeframe after instance + // creation at the provider succeeded + // + // Example use case: A controller that deletes Machines which do + // not result in a Node joining the cluster within a given timeout + // and that are managed by a MachineSet + JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" +) + +type ClusterStatusError string + +const ( + // InvalidConfigurationClusterError indicates that the cluster + // configuration is invalid. + InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" + + // UnsupportedChangeClusterError indicates that the cluster + // spec has been updated in an unsupported way. That cannot be + // reconciled. + UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" + + // CreateClusterError indicates that an error was encountered + // when trying to create the cluster. + CreateClusterError ClusterStatusError = "CreateError" + + // UpdateClusterError indicates that an error was encountered + // when trying to update the cluster. + UpdateClusterError ClusterStatusError = "UpdateError" + + // DeleteClusterError indicates that an error was encountered + // when trying to delete the cluster. + DeleteClusterError ClusterStatusError = "DeleteError" +) + +type MachineSetStatusError string + +const ( + // Represents that the combination of configuration in the MachineTemplateSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist. + InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" +) + +type MachineDeploymentStrategyType string + +const ( + // Replace the old MachineSet by new one using rolling update + // i.e. gradually scale down the old MachineSet and scale up the new one. + RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" +) + // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -51,6 +149,8 @@ const ( // +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.nodeRef.name",description="Node associated with machine",priority=1 // +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID of machine created in cloud provider",priority=1 // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".metadata.annotations['machine\\.openshift\\.io/instance-state']",description="State of instance",priority=1 +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 type Machine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -59,14 +159,6 @@ type Machine struct { Status MachineStatus `json:"status,omitempty"` } -func (m *Machine) GetConditions() Conditions { - return m.Status.Conditions -} - -func (m *Machine) SetConditions(conditions Conditions) { - m.Status.Conditions = conditions -} - // MachineSpec defines the desired state of Machine type MachineSpec struct { // ObjectMeta will autopopulate the Node created. Use this to @@ -195,26 +287,11 @@ type LastOperation struct { Type *string `json:"type,omitempty"` } -func (m *Machine) Validate() field.ErrorList { - errors := field.ErrorList{} - - // validate spec.labels - fldPath := field.NewPath("spec") - if m.Labels[MachineClusterIDLabel] == "" { - errors = append(errors, field.Invalid(fldPath.Child("labels"), m.Labels, fmt.Sprintf("missing %v label.", MachineClusterIDLabel))) - } - - // validate provider config is set - if m.Spec.ProviderSpec.Value == nil { - errors = append(errors, field.Invalid(fldPath.Child("spec").Child("providerspec"), m.Spec.ProviderSpec, "value field must be set")) - } - - return errors -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MachineList contains a list of Machine +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 type MachineList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go similarity index 91% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go rename to vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go index 4dd2b7a56..bb79f725c 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machinehealthcheck_types.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -19,28 +19,26 @@ type RemediationStrategyType string // +kubebuilder:printcolumn:name="MaxUnhealthy",type="string",JSONPath=".spec.maxUnhealthy",description="Maximum number of unhealthy machines allowed" // +kubebuilder:printcolumn:name="ExpectedMachines",type="integer",JSONPath=".status.expectedMachines",description="Number of machines currently monitored" // +kubebuilder:printcolumn:name="CurrentHealthy",type="integer",JSONPath=".status.currentHealthy",description="Current observed healthy machines" +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 type MachineHealthCheck struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of machine health check policy + // +optional Spec MachineHealthCheckSpec `json:"spec,omitempty"` // Most recently observed status of MachineHealthCheck resource + // +optional Status MachineHealthCheckStatus `json:"status,omitempty"` } -func (m *MachineHealthCheck) GetConditions() Conditions { - return m.Status.Conditions -} - -func (m *MachineHealthCheck) SetConditions(conditions Conditions) { - m.Status.Conditions = conditions -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MachineHealthCheckList contains a list of MachineHealthCheck +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 type MachineHealthCheckList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` @@ -66,8 +64,9 @@ type MachineHealthCheckSpec struct { // Percentage values must be positive whole numbers and are capped at 100%. // Both 0 and 0% are valid and will block all remediation. // +kubebuilder:default:="100%" + // +kubebuilder:validation:XIntOrString // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" - // +kubebuilder:validation:Type:=string + // +optional MaxUnhealthy *intstr.IntOrString `json:"maxUnhealthy,omitempty"` // Machines older than this duration without a node will be considered to have @@ -81,6 +80,7 @@ type MachineHealthCheckSpec struct { // +kubebuilder:default:="10m" // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" // +kubebuilder:validation:Type:=string + // +optional NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"` // RemediationTemplate is a reference to a remediation template @@ -121,7 +121,7 @@ type MachineHealthCheckStatus struct { // total number of machines counted by this machine health check // +kubebuilder:validation:Minimum=0 - CurrentHealthy *int `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` + CurrentHealthy *int `json:"currentHealthy"` // RemediationsAllowed is the number of further remediations allowed by this machine health check before // maxUnhealthy short circuiting will be applied @@ -130,5 +130,6 @@ type MachineHealthCheckStatus struct { RemediationsAllowed int32 `json:"remediationsAllowed"` // Conditions defines the current state of the MachineHealthCheck + // +optional Conditions Conditions `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go similarity index 75% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go rename to vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index 2dd562b4e..bbc6b736b 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_types.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -1,29 +1,7 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - package v1beta1 import ( - "log" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/validation/field" ) // +genclient @@ -38,6 +16,8 @@ import ( // +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas" // +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.availableReplicas",description="Observed number of available replicas" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machineset age" +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 type MachineSet struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -53,23 +33,19 @@ type MachineSetSpec struct { // Defaults to 1. // +kubebuilder:default=1 Replicas *int32 `json:"replicas,omitempty"` - // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. // Defaults to 0 (machine will be considered available as soon as it is ready) // +optional MinReadySeconds int32 `json:"minReadySeconds,omitempty"` - // DeletePolicy defines the policy used to identify nodes to delete when downscaling. // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" // +kubebuilder:validation:Enum=Random;Newest;Oldest DeletePolicy string `json:"deletePolicy,omitempty"` - // Selector is a label query over machines that should match the replica count. // Label keys and values that must match in order to be controlled by this MachineSet. // It must match the machine template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors Selector metav1.LabelSelector `json:"selector"` - // Template is the object that describes the machine that will be created if // insufficient replicas are detected. // +optional @@ -86,13 +62,11 @@ const ( // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). // Finally, it picks Machines at random to delete. RandomMachineSetDeletePolicy MachineSetDeletePolicy = "Random" - // NewestMachineSetDeletePolicy prioritizes both Machines that have the annotation // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). // It then prioritizes the newest Machines for deletion based on the Machine's CreationTimestamp. NewestMachineSetDeletePolicy MachineSetDeletePolicy = "Newest" - // OldestMachineSetDeletePolicy prioritizes both Machines that have the annotation // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). @@ -106,7 +80,6 @@ type MachineTemplateSpec struct { // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional ObjectMeta `json:"metadata,omitempty"` - // Specification of the desired behavior of the machine. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +optional @@ -117,23 +90,18 @@ type MachineTemplateSpec struct { type MachineSetStatus struct { // Replicas is the most recently observed number of replicas. Replicas int32 `json:"replicas"` - // The number of replicas that have labels matching the labels of the machine template of the MachineSet. // +optional FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` - // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty"` - // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. // +optional AvailableReplicas int32 `json:"availableReplicas,omitempty"` - // ObservedGeneration reflects the generation of the most recently observed MachineSet. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty"` - // In the event that there is a terminal problem reconciling the // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason // will be populated with a succinct value suitable for machine @@ -158,51 +126,11 @@ type MachineSetStatus struct { ErrorMessage *string `json:"errorMessage,omitempty"` } -func (m *MachineSet) Validate() field.ErrorList { - errors := field.ErrorList{} - - // validate spec.selector and spec.template.labels - fldPath := field.NewPath("spec") - errors = append(errors, metav1validation.ValidateLabelSelector(&m.Spec.Selector, fldPath.Child("selector"))...) - if len(m.Spec.Selector.MatchLabels)+len(m.Spec.Selector.MatchExpressions) == 0 { - errors = append(errors, field.Invalid(fldPath.Child("selector"), m.Spec.Selector, "empty selector is not valid for MachineSet.")) - } - selector, err := metav1.LabelSelectorAsSelector(&m.Spec.Selector) - if err != nil { - errors = append(errors, field.Invalid(fldPath.Child("selector"), m.Spec.Selector, "invalid label selector.")) - } else { - labels := labels.Set(m.Spec.Template.Labels) - if !selector.Matches(labels) { - errors = append(errors, field.Invalid(fldPath.Child("template", "metadata", "labels"), m.Spec.Template.Labels, "`selector` does not match template `labels`")) - } - } - - return errors -} - -// DefaultingFunction sets default MachineSet field values -func (m *MachineSet) Default() { - log.Printf("Defaulting fields for MachineSet %s\n", m.Name) - - if m.Spec.Replicas == nil { - m.Spec.Replicas = new(int32) - *m.Spec.Replicas = 1 - } - - if len(m.Namespace) == 0 { - m.Namespace = metav1.NamespaceDefault - } - - if m.Spec.DeletePolicy == "" { - randomPolicy := string(RandomMachineSetDeletePolicy) - log.Printf("Defaulting to %s\n", randomPolicy) - m.Spec.DeletePolicy = randomPolicy - } -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MachineSetList contains a list of MachineSet +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 type MachineSetList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go new file mode 100644 index 000000000..10bb715da --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go @@ -0,0 +1,207 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ProviderSpec defines the configuration to use during node creation. +type ProviderSpec struct { + + // No more than one of the following may be specified. + + // Value is an inlined, serialized representation of the resource + // configuration. It is recommended that providers maintain their own + // versioned API types that should be serialized/deserialized from this + // field, akin to component config. + // +optional + // +kubebuilder:validation:XPreserveUnknownFields + Value *runtime.RawExtension `json:"value,omitempty"` +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. This is a copy of customizable fields from metav1.ObjectMeta. +// +// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, +// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases +// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies +// the API and some issues that can impact user experience. +// +// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) +// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, +// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`. +// The investigation showed that `controller-tools@v2` behaves differently than its previous version +// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package. +// +// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` +// had validation properties, including for `creationTimestamp` (metav1.Time). +// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` +// which breaks validation because the field isn't marked as nullable. +// +// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded +// types. When that happens, this hack should be revisited. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` +} + +// ConditionSeverity expresses the severity of a Condition Type failing. +type ConditionSeverity string + +const ( + // ConditionSeverityError specifies that a condition with `Status=False` is an error. + ConditionSeverityError ConditionSeverity = "Error" + + // ConditionSeverityWarning specifies that a condition with `Status=False` is a warning. + ConditionSeverityWarning ConditionSeverity = "Warning" + + // ConditionSeverityInfo specifies that a condition with `Status=False` is informative. + ConditionSeverityInfo ConditionSeverity = "Info" + + // ConditionSeverityNone should apply only to conditions with `Status=True`. + ConditionSeverityNone ConditionSeverity = "" +) + +// ConditionType is a valid value for Condition.Type. +type ConditionType string + +// Valid conditions for a machine. +const ( + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + // NOTE: MachineCreation is here for historical reasons, MachineCreated should be used instead + MachineCreation ConditionType = "MachineCreation" + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + MachineCreated ConditionType = "MachineCreated" + // InstanceExistsCondition is set on the Machine to show whether a virtual mahcine has been created by the cloud provider. + InstanceExistsCondition ConditionType = "InstanceExists" + // RemediationAllowedCondition is set on MachineHealthChecks to show the status of whether the MachineHealthCheck is + // allowed to remediate any Machines or whether it is blocked from remediating any further. + RemediationAllowedCondition ConditionType = "RemediationAllowed" + // ExternalRemediationTemplateAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationTemplateAvailable is set to false if external remediation template is not found. + ExternalRemediationTemplateAvailable ConditionType = "ExternalRemediationTemplateAvailable" + // ExternalRemediationRequestAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationRequestAvailable is set to false if creating external remediation request fails. + ExternalRemediationRequestAvailable ConditionType = "ExternalRemediationRequestAvailable" +) + +const ( + // MachineCreationSucceeded indicates machine creation success. + MachineCreationSucceededConditionReason string = "MachineCreationSucceeded" + // MachineCreationFailed indicates machine creation failure. + MachineCreationFailedConditionReason string = "MachineCreationFailed" + // ErrorCheckingProviderReason is the reason used when the exist operation fails. + // This would normally be because we cannot contact the provider. + ErrorCheckingProviderReason = "ErrorCheckingProvider" + // InstanceMissingReason is the reason used when the machine was provisioned, but the instance has gone missing. + InstanceMissingReason = "InstanceMissing" + // InstanceNotCreatedReason is the reason used when the machine has not yet been provisioned. + InstanceNotCreatedReason = "InstanceNotCreated" + // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked + // from making any further remediations. + TooManyUnhealthyReason = "TooManyUnhealthy" + // ExternalRemediationTemplateNotFound is the reason used when a machine health check fails to find external remediation template. + ExternalRemediationTemplateNotFound = "ExternalRemediationTemplateNotFound" + // ExternalRemediationRequestCreationFailed is the reason used when a machine health check fails to create external remediation request. + ExternalRemediationRequestCreationFailed = "ExternalRemediationRequestCreationFailed" +) + +// Condition defines an observation of a Machine API resource operational state. +type Condition struct { + // Type of condition in CamelCase or in foo.example.com/CamelCase. + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + // can be useful (see .node.status.conditions), the ability to deconflict is important. + // +required + Type ConditionType `json:"type"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status corev1.ConditionStatus `json:"status"` + + // Severity provides an explicit classification of Reason code, so the users or machines can immediately + // understand the current situation and act accordingly. + // The Severity field MUST be set only when Status=False. + // +optional + Severity ConditionSeverity `json:"severity,omitempty"` + + // Last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when + // the API field changed is acceptable. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + // The reason for the condition's last transition in CamelCase. + // The specific API may choose whether or not this field is considered a guaranteed API. + // This field may not be empty. + // +optional + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + // This field may be empty. + // +optional + Message string `json:"message,omitempty"` +} + +// Conditions provide observations of the operational state of a Machine API resource. +type Conditions []Condition diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderconfig_types.go b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go similarity index 66% rename from vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderconfig_types.go rename to vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go index 68682cd1d..9d28632f8 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderconfig_types.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go @@ -5,32 +5,30 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field // for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. -// +k8s:openapi-gen=true +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type VSphereMachineProviderSpec struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + // +optional metav1.ObjectMeta `json:"metadata,omitempty"` - // UserDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance + // +optional UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - // CredentialsSecret is a reference to the secret with vSphere credentials. + // +optional CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - // Template is the name, inventory path, or instance UUID of the template // used to clone new machines. Template string `json:"template"` - + // Workspace describes the workspace to use for the machine. + // +optional Workspace *Workspace `json:"workspace,omitempty"` - // Network is the network configuration for this machine's VM. Network NetworkSpec `json:"network"` - // NumCPUs is the number of virtual processors in a virtual machine. // Defaults to the analogue property value in the template from which this // machine is cloned. @@ -55,7 +53,6 @@ type VSphereMachineProviderSpec struct { // Snapshot is the name of the snapshot from which the VM was cloned // +optional Snapshot string `json:"snapshot"` - // CloneMode specifies the type of clone operation. // The LinkedClone mode is only support for templates that have at least // one snapshot. If the template has no snapshots, then CloneMode defaults @@ -76,7 +73,6 @@ const ( // clone operation once the operation is complete. This is the safest clone // mode, but it is not the fastest. FullClone CloneMode = "fullClone" - // LinkedClone means resulting VMs will be dependent upon the snapshot of // the source VM/template from which the VM was cloned. This is the fastest // clone mode, but it also prevents expanding a VMs disk beyond the size of @@ -86,6 +82,7 @@ const ( // NetworkSpec defines the virtual machine's network configuration. type NetworkSpec struct { + // Devices defines the virtual machine's network interfaces. Devices []NetworkDeviceSpec `json:"devices"` } @@ -103,26 +100,59 @@ type Workspace struct { // Server is the IP address or FQDN of the vSphere endpoint. // +optional Server string `gcfg:"server,omitempty" json:"server,omitempty"` - // Datacenter is the datacenter in which VMs are created/located. // +optional Datacenter string `gcfg:"datacenter,omitempty" json:"datacenter,omitempty"` - // Folder is the folder in which VMs are created/located. // +optional Folder string `gcfg:"folder,omitempty" json:"folder,omitempty"` - // Datastore is the datastore in which VMs are created/located. // +optional Datastore string `gcfg:"default-datastore,omitempty" json:"datastore,omitempty"` - // ResourcePool is the resource pool in which VMs are created/located. // +optional ResourcePool string `gcfg:"resourcepool-path,omitempty" json:"resourcePool,omitempty"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// VSphereMachineProviderCondition is a condition in a VSphereMachineProviderStatus. +type VSphereMachineProviderCondition struct { + // Type is the type of the condition. + Type ConditionType `json:"type"` + // Status is the status of the condition. + Status corev1.ConditionStatus `json:"status"` + // LastProbeTime is the last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` + // LastTransitionTime is the last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + // Reason is a unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Message is a human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +// VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains VSphere-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type VSphereMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` -func init() { - SchemeBuilder.Register(&VSphereMachineProviderSpec{}) + // InstanceID is the ID of the instance in VSphere + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the provisioning state of the VSphere Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + Conditions []VSphereMachineProviderCondition `json:"conditions,omitempty"` + // TaskRef is a managed object reference to a Task related to the machine. + // This value is set automatically at runtime and should not be set or + // modified by users. + // +optional + TaskRef string `json:"taskRef,omitempty"` } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..fb98815aa --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1585 @@ +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderCondition) DeepCopyInto(out *AWSMachineProviderCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderCondition. +func (in *AWSMachineProviderCondition) DeepCopy() *AWSMachineProviderCondition { + if in == nil { + return nil + } + out := new(AWSMachineProviderCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.AMI.DeepCopyInto(&out.AMI) + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagSpecification, len(*in)) + copy(*out, *in) + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(AWSResourceReference) + (*in).DeepCopyInto(*out) + } + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]AWSResourceReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Subnet.DeepCopyInto(&out.Subnet) + out.Placement = in.Placement + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]LoadBalancerReference, len(*in)) + copy(*out, *in) + } + if in.BlockDevices != nil { + in, out := &in.BlockDevices, &out.BlockDevices + *out = make([]BlockDeviceMappingSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpotMarketOptions != nil { + in, out := &in.SpotMarketOptions, &out.SpotMarketOptions + *out = new(SpotMarketOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfig. +func (in *AWSMachineProviderConfig) DeepCopy() *AWSMachineProviderConfig { + if in == nil { + return nil + } + out := new(AWSMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderConfigList) DeepCopyInto(out *AWSMachineProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSMachineProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfigList. +func (in *AWSMachineProviderConfigList) DeepCopy() *AWSMachineProviderConfigList { + if in == nil { + return nil + } + out := new(AWSMachineProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AWSMachineProviderCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderStatus. +func (in *AWSMachineProviderStatus) DeepCopy() *AWSMachineProviderStatus { + if in == nil { + return nil + } + out := new(AWSMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]Filter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. +func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { + if in == nil { + return nil + } + out := new(AWSResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineProviderCondition) DeepCopyInto(out *AzureMachineProviderCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderCondition. +func (in *AzureMachineProviderCondition) DeepCopy() *AzureMachineProviderCondition { + if in == nil { + return nil + } + out := new(AzureMachineProviderCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineProviderSpec) DeepCopyInto(out *AzureMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.SecretReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.SecretReference) + **out = **in + } + out.Image = in.Image + in.OSDisk.DeepCopyInto(&out.OSDisk) + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ApplicationSecurityGroups != nil { + in, out := &in.ApplicationSecurityGroups, &out.ApplicationSecurityGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NatRule != nil { + in, out := &in.NatRule, &out.NatRule + *out = new(int64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.SpotVMOptions != nil { + in, out := &in.SpotVMOptions, &out.SpotVMOptions + *out = new(SpotVMOptions) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SecurityProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderSpec. +func (in *AzureMachineProviderSpec) DeepCopy() *AzureMachineProviderSpec { + if in == nil { + return nil + } + out := new(AzureMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineProviderStatus) DeepCopyInto(out *AzureMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.VMID != nil { + in, out := &in.VMID, &out.VMID + *out = new(string) + **out = **in + } + if in.VMState != nil { + in, out := &in.VMState, &out.VMState + *out = new(AzureVMState) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]AzureMachineProviderCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderStatus. +func (in *AzureMachineProviderStatus) DeepCopy() *AzureMachineProviderStatus { + if in == nil { + return nil + } + out := new(AzureMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingSpec) DeepCopyInto(out *BlockDeviceMappingSpec) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSBlockDeviceSpec) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingSpec. +func (in *BlockDeviceMappingSpec) DeepCopy() *BlockDeviceMappingSpec { + if in == nil { + return nil + } + out := new(BlockDeviceMappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetParameters. +func (in *DiskEncryptionSetParameters) DeepCopy() *DiskEncryptionSetParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + in.KMSKey.DeepCopyInto(&out.KMSKey) + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(int64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(int64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceSpec. +func (in *EBSBlockDeviceSpec) DeepCopy() *EBSBlockDeviceSpec { + if in == nil { + return nil + } + out := new(EBSBlockDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPDisk) DeepCopyInto(out *GCPDisk) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(GCPEncryptionKeyReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk. +func (in *GCPDisk) DeepCopy() *GCPDisk { + if in == nil { + return nil + } + out := new(GCPDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPEncryptionKeyReference) DeepCopyInto(out *GCPEncryptionKeyReference) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEncryptionKeyReference. +func (in *GCPEncryptionKeyReference) DeepCopy() *GCPEncryptionKeyReference { + if in == nil { + return nil + } + out := new(GCPEncryptionKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMachineProviderCondition) DeepCopyInto(out *GCPMachineProviderCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderCondition. +func (in *GCPMachineProviderCondition) DeepCopy() *GCPMachineProviderCondition { + if in == nil { + return nil + } + out := new(GCPMachineProviderCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMachineProviderSpec) DeepCopyInto(out *GCPMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]*GCPDisk, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPDisk) + (*in).DeepCopyInto(*out) + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]*GCPMetadata, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPMetadata) + (*in).DeepCopyInto(*out) + } + } + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]*GCPNetworkInterface, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPNetworkInterface) + **out = **in + } + } + } + if in.ServiceAccounts != nil { + in, out := &in.ServiceAccounts, &out.ServiceAccounts + *out = make([]GCPServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TargetPools != nil { + in, out := &in.TargetPools, &out.TargetPools + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderSpec. +func (in *GCPMachineProviderSpec) DeepCopy() *GCPMachineProviderSpec { + if in == nil { + return nil + } + out := new(GCPMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GCPMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMachineProviderStatus) DeepCopyInto(out *GCPMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]GCPMachineProviderCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderStatus. +func (in *GCPMachineProviderStatus) DeepCopy() *GCPMachineProviderStatus { + if in == nil { + return nil + } + out := new(GCPMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMetadata) DeepCopyInto(out *GCPMetadata) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMetadata. +func (in *GCPMetadata) DeepCopy() *GCPMetadata { + if in == nil { + return nil + } + out := new(GCPMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPNetworkInterface) DeepCopyInto(out *GCPNetworkInterface) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPNetworkInterface. +func (in *GCPNetworkInterface) DeepCopy() *GCPNetworkInterface { + if in == nil { + return nil + } + out := new(GCPNetworkInterface) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount. +func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount { + if in == nil { + return nil + } + out := new(GCPServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastOperation) DeepCopyInto(out *LastOperation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. +func (in *LastOperation) DeepCopy() *LastOperation { + if in == nil { + return nil + } + out := new(LastOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerReference) DeepCopyInto(out *LoadBalancerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerReference. +func (in *LoadBalancerReference) DeepCopy() *LoadBalancerReference { + if in == nil { + return nil + } + out := new(LoadBalancerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Machine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheck) DeepCopyInto(out *MachineHealthCheck) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheck. +func (in *MachineHealthCheck) DeepCopy() *MachineHealthCheck { + if in == nil { + return nil + } + out := new(MachineHealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineHealthCheck) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckList) DeepCopyInto(out *MachineHealthCheckList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineHealthCheck, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckList. +func (in *MachineHealthCheckList) DeepCopy() *MachineHealthCheckList { + if in == nil { + return nil + } + out := new(MachineHealthCheckList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineHealthCheckList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + if in.UnhealthyConditions != nil { + in, out := &in.UnhealthyConditions, &out.UnhealthyConditions + *out = make([]UnhealthyCondition, len(*in)) + copy(*out, *in) + } + if in.MaxUnhealthy != nil { + in, out := &in.MaxUnhealthy, &out.MaxUnhealthy + *out = new(intstr.IntOrString) + **out = **in + } + if in.NodeStartupTimeout != nil { + in, out := &in.NodeStartupTimeout, &out.NodeStartupTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.RemediationTemplate != nil { + in, out := &in.RemediationTemplate, &out.RemediationTemplate + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckSpec. +func (in *MachineHealthCheckSpec) DeepCopy() *MachineHealthCheckSpec { + if in == nil { + return nil + } + out := new(MachineHealthCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckStatus) DeepCopyInto(out *MachineHealthCheckStatus) { + *out = *in + if in.ExpectedMachines != nil { + in, out := &in.ExpectedMachines, &out.ExpectedMachines + *out = new(int) + **out = **in + } + if in.CurrentHealthy != nil { + in, out := &in.CurrentHealthy, &out.CurrentHealthy + *out = new(int) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckStatus. +func (in *MachineHealthCheckStatus) DeepCopy() *MachineHealthCheckStatus { + if in == nil { + return nil + } + out := new(MachineHealthCheckStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineList) DeepCopyInto(out *MachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Machine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. +func (in *MachineList) DeepCopy() *MachineList { + if in == nil { + return nil + } + out := new(MachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSet) DeepCopyInto(out *MachineSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. +func (in *MachineSet) DeepCopy() *MachineSet { + if in == nil { + return nil + } + out := new(MachineSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. +func (in *MachineSetList) DeepCopy() *MachineSetList { + if in == nil { + return nil + } + out := new(MachineSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. +func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { + if in == nil { + return nil + } + out := new(MachineSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { + *out = *in + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(MachineSetStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. +func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { + if in == nil { + return nil + } + out := new(MachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. +func (in *MachineSpec) DeepCopy() *MachineSpec { + if in == nil { + return nil + } + out := new(MachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { + *out = *in + if in.NodeRef != nil { + in, out := &in.NodeRef, &out.NodeRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1.NodeAddress, len(*in)) + copy(*out, *in) + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. +func (in *MachineStatus) DeepCopy() *MachineStatus { + if in == nil { + return nil + } + out := new(MachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. +func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { + if in == nil { + return nil + } + out := new(MachineTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedDiskParameters) DeepCopyInto(out *ManagedDiskParameters) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(DiskEncryptionSetParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskParameters. +func (in *ManagedDiskParameters) DeepCopy() *ManagedDiskParameters { + if in == nil { + return nil + } + out := new(ManagedDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceSpec. +func (in *NetworkDeviceSpec) DeepCopy() *NetworkDeviceSpec { + if in == nil { + return nil + } + out := new(NetworkDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]NetworkDeviceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + in.ManagedDisk.DeepCopyInto(&out.ManagedDisk) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]metav1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Placement) DeepCopyInto(out *Placement) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. +func (in *Placement) DeepCopy() *Placement { + if in == nil { + return nil + } + out := new(Placement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. +func (in *ProviderSpec) DeepCopy() *ProviderSpec { + if in == nil { + return nil + } + out := new(ProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfile) DeepCopyInto(out *SecurityProfile) { + *out = *in + if in.EncryptionAtHost != nil { + in, out := &in.EncryptionAtHost, &out.EncryptionAtHost + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfile. +func (in *SecurityProfile) DeepCopy() *SecurityProfile { + if in == nil { + return nil + } + out := new(SecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions. +func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions { + if in == nil { + return nil + } + out := new(SpotMarketOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotVMOptions) DeepCopyInto(out *SpotVMOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotVMOptions. +func (in *SpotVMOptions) DeepCopy() *SpotVMOptions { + if in == nil { + return nil + } + out := new(SpotVMOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagSpecification) DeepCopyInto(out *TagSpecification) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecification. +func (in *TagSpecification) DeepCopy() *TagSpecification { + if in == nil { + return nil + } + out := new(TagSpecification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnhealthyCondition) DeepCopyInto(out *UnhealthyCondition) { + *out = *in + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyCondition. +func (in *UnhealthyCondition) DeepCopy() *UnhealthyCondition { + if in == nil { + return nil + } + out := new(UnhealthyCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderCondition) DeepCopyInto(out *VSphereMachineProviderCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderCondition. +func (in *VSphereMachineProviderCondition) DeepCopy() *VSphereMachineProviderCondition { + if in == nil { + return nil + } + out := new(VSphereMachineProviderCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Workspace != nil { + in, out := &in.Workspace, &out.Workspace + *out = new(Workspace) + **out = **in + } + in.Network.DeepCopyInto(&out.Network) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderSpec. +func (in *VSphereMachineProviderSpec) DeepCopy() *VSphereMachineProviderSpec { + if in == nil { + return nil + } + out := new(VSphereMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VSphereMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderStatus) DeepCopyInto(out *VSphereMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]VSphereMachineProviderCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderStatus. +func (in *VSphereMachineProviderStatus) DeepCopy() *VSphereMachineProviderStatus { + if in == nil { + return nil + } + out := new(VSphereMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..6b569f4aa --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,665 @@ +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AWSMachineProviderCondition = map[string]string{ + "": "AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus.", + "type": "Type is the type of the condition.", + "status": "Status is the status of the condition.", + "lastProbeTime": "LastProbeTime is the last time we probed the condition.", + "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", + "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Message is a human-readable message indicating details about last transition.", +} + +func (AWSMachineProviderCondition) SwaggerDoc() map[string]string { + return map_AWSMachineProviderCondition +} + +var map_AWSMachineProviderConfig = map[string]string{ + "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "ami": "AMI is the reference to the AMI from which to create the machine instance.", + "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge", + "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", + "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", + "keyName": "KeyName is the name of the KeyPair to use for SSH", + "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", + "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", + "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.", + "subnet": "Subnet is a reference to the subnet to use for this instance", + "placement": "Placement specifies where to create the instance in AWS", + "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", + "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", + "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", +} + +func (AWSMachineProviderConfig) SwaggerDoc() map[string]string { + return map_AWSMachineProviderConfig +} + +var map_AWSMachineProviderConfigList = map[string]string{ + "": "AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (AWSMachineProviderConfigList) SwaggerDoc() map[string]string { + return map_AWSMachineProviderConfigList +} + +var map_AWSMachineProviderStatus = map[string]string{ + "": "AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains AWS-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the instance ID of the machine created in AWS", + "instanceState": "InstanceState is the state of the AWS instance for this machine", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AWSMachineProviderStatus +} + +var map_AWSResourceReference = map[string]string{ + "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", + "id": "ID of resource", + "arn": "ARN of resource", + "filters": "Filters is a set of filters used to identify a resource", +} + +func (AWSResourceReference) SwaggerDoc() map[string]string { + return map_AWSResourceReference +} + +var map_BlockDeviceMappingSpec = map[string]string{ + "": "BlockDeviceMappingSpec describes a block device mapping", + "deviceName": "The device name exposed to the machine (for example, /dev/sdh or xvdh).", + "ebs": "Parameters used to automatically set up EBS volumes when the machine is launched.", + "noDevice": "Suppresses the specified device included in the block device mapping of the AMI.", + "virtualName": "The virtual device name (ephemeralN). Machine store volumes are numbered starting from 0. An machine type with 2 available machine store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available machine store volumes depends on the machine type. After you connect to the machine, you must mount the volume.\n\nConstraints: For M3 machines, you must specify machine store volumes in the block device mapping for the machine. When you launch an M3 machine, we ignore any machine store volumes specified in the block device mapping for the AMI.", +} + +func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string { + return map_BlockDeviceMappingSpec +} + +var map_EBSBlockDeviceSpec = map[string]string{ + "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", + "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.", + "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.", + "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.", + "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.", + "volumeSize": "The size of the volume, in GiB.\n\nConstraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n\nDefault: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.", + "volumeType": "The volume type: gp2, io1, st1, sc1, or standard. Default: standard", +} + +func (EBSBlockDeviceSpec) SwaggerDoc() map[string]string { + return map_EBSBlockDeviceSpec +} + +var map_Filter = map[string]string{ + "": "Filter is a filter used to identify an AWS resource", + "name": "Name of the filter. Filter names are case-sensitive.", + "values": "Values includes one or more filter values. Filter values are case-sensitive.", +} + +func (Filter) SwaggerDoc() map[string]string { + return map_Filter +} + +var map_LoadBalancerReference = map[string]string{ + "": "LoadBalancerReference is a reference to a load balancer on AWS.", +} + +func (LoadBalancerReference) SwaggerDoc() map[string]string { + return map_LoadBalancerReference +} + +var map_Placement = map[string]string{ + "": "Placement indicates where to create the instance in AWS", + "region": "Region is the region to use to create the instance", + "availabilityZone": "AvailabilityZone is the availability zone of the instance", + "tenancy": "Tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", +} + +func (Placement) SwaggerDoc() map[string]string { + return map_Placement +} + +var map_SpotMarketOptions = map[string]string{ + "": "SpotMarketOptions defines the options available to a user when configuring Machines to run on Spot instances. Most users should provide an empty struct.", + "maxPrice": "The maximum price the user is willing to pay for their instances Default: On-Demand price", +} + +func (SpotMarketOptions) SwaggerDoc() map[string]string { + return map_SpotMarketOptions +} + +var map_TagSpecification = map[string]string{ + "": "TagSpecification is the name/value pair for a tag", + "name": "Name of the tag", + "value": "Value of the tag", +} + +func (TagSpecification) SwaggerDoc() map[string]string { + return map_TagSpecification +} + +var map_AzureMachineProviderCondition = map[string]string{ + "": "AzureMachineProviderCondition is a condition in a AzureMachineProviderStatus", + "type": "Type is the type of the condition.", + "status": "Status is the status of the condition.", + "lastProbeTime": "LastProbeTime is the last time we probed the condition.", + "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", + "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Message is a human-readable message indicating details about last transition.", +} + +func (AzureMachineProviderCondition) SwaggerDoc() map[string]string { + return map_AzureMachineProviderCondition +} + +var map_AzureMachineProviderSpec = map[string]string{ + "": "AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. Required parameters such as location that are not specified by this configuration, will be defaulted by the actuator. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with Azure credentials.", + "location": "Location is the region to use to create the instance", + "vmSize": "VMSize is the size of the VM to create.", + "image": "Image is the OS image to use to create the instance.", + "osDisk": "OSDisk represents the parameters for creating the OS disk.", + "sshPublicKey": "SSHPublicKey is the public key to use to SSH to the virtual machine.", + "publicIP": "PublicIP if true a public IP will be used", + "tags": "Tags is a list of tags to apply to the machine.", + "securityGroup": "Network Security Group that needs to be attached to the machine's interface. No security group will be attached if empty.", + "applicationSecurityGroups": "Application Security Groups that need to be attached to the machine's interface. No application security groups will be attached if zero-length.", + "subnet": "Subnet to use for this instance", + "publicLoadBalancer": "PublicLoadBalancer to use for this instance", + "internalLoadBalancer": "InternalLoadBalancerName to use for this instance", + "natRule": "NatRule to set inbound NAT rule of the load balancer", + "managedIdentity": "ManagedIdentity to set managed identity name", + "vnet": "Vnet to set virtual network name", + "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone", + "networkResourceGroup": "NetworkResourceGroup is the resource group for the virtual machine's network", + "resourceGroup": "ResourceGroup is the resource group for the virtual machine", + "spotVMOptions": "SpotVMOptions allows the ability to specify the Machine should use a Spot VM", + "securityProfile": "SecurityProfile specifies the Security profile settings for a virtual machine.", + "acceleratedNetworking": "AcceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", + "availabilitySet": "AvailabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", +} + +func (AzureMachineProviderSpec) SwaggerDoc() map[string]string { + return map_AzureMachineProviderSpec +} + +var map_AzureMachineProviderStatus = map[string]string{ + "": "AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains Azure-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "vmId": "VMID is the ID of the virtual machine created in Azure.", + "vmState": "VMState is the provisioning state of the Azure virtual machine.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status.", +} + +func (AzureMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AzureMachineProviderStatus +} + +var map_DiskEncryptionSetParameters = map[string]string{ + "": "DiskEncryptionSetParameters is the disk encryption set properties", + "id": "ID is the disk encryption set ID", +} + +func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string { + return map_DiskEncryptionSetParameters +} + +var map_Image = map[string]string{ + "": "Image is a mirror of azure sdk compute.ImageReference", + "publisher": "Publisher is the name of the organization that created the image", + "offer": "Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", + "sku": "SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", + "version": "Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", + "resourceID": "ResourceID specifies an image to use by ID", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ManagedDiskParameters = map[string]string{ + "": "ManagedDiskParameters is the parameters of a managed disk.", + "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\" and \"Premium_LRS\".", + "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties", +} + +func (ManagedDiskParameters) SwaggerDoc() map[string]string { + return map_ManagedDiskParameters +} + +var map_OSDisk = map[string]string{ + "osType": "OSType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", + "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the OS disk.", + "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", +} + +func (OSDisk) SwaggerDoc() map[string]string { + return map_OSDisk +} + +var map_SecurityProfile = map[string]string{ + "": "SecurityProfile specifies the Security profile settings for a virtual machine or virtual machine scale set.", + "encryptionAtHost": "This field indicates whether Host Encryption should be enabled or disabled for a virtual machine or virtual machine scale set. Default is disabled.", +} + +func (SecurityProfile) SwaggerDoc() map[string]string { + return map_SecurityProfile +} + +var map_SpotVMOptions = map[string]string{ + "": "SpotVMOptions defines the options relevant to running the Machine on Spot VMs", + "maxPrice": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances", +} + +func (SpotVMOptions) SwaggerDoc() map[string]string { + return map_SpotVMOptions +} + +var map_GCPDisk = map[string]string{ + "": "GCPDisk describes disks for GCP.", + "autoDelete": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", + "boot": "Boot indicates if this is a boot disk (default false).", + "sizeGb": "SizeGB is the size of the disk (in GB).", + "type": "Type is the type of the disk (eg: pd-standard).", + "image": "Image is the source image to create this disk.", + "labels": "Labels list of labels to apply to the disk.", + "encryptionKey": "EncryptionKey is the customer-supplied encryption key of the disk.", +} + +func (GCPDisk) SwaggerDoc() map[string]string { + return map_GCPDisk +} + +var map_GCPEncryptionKeyReference = map[string]string{ + "": "GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption.", + "kmsKey": "KMSKeyName is the reference KMS key, in the format", + "kmsKeyServiceAccount": "KMSKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", +} + +func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string { + return map_GCPEncryptionKeyReference +} + +var map_GCPKMSKeyReference = map[string]string{ + "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", + "name": "Name is the name of the customer managed encryption key to be used for the disk encryption.", + "keyRing": "KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.", + "projectID": "ProjectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", + "location": "Location is the GCP location in which the Key Ring exists.", +} + +func (GCPKMSKeyReference) SwaggerDoc() map[string]string { + return map_GCPKMSKeyReference +} + +var map_GCPMachineProviderCondition = map[string]string{ + "": "GCPMachineProviderCondition is a condition in a GCPMachineProviderStatus", + "type": "Type is the type of the condition.", + "status": "Status is the status of the condition.", + "lastProbeTime": "LastProbeTime is the last time we probed the condition.", + "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", + "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Message is a human-readable message indicating details about last transition.", +} + +func (GCPMachineProviderCondition) SwaggerDoc() map[string]string { + return map_GCPMachineProviderCondition +} + +var map_GCPMachineProviderSpec = map[string]string{ + "": "GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with GCP credentials.", + "canIPForward": "CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", + "deletionProtection": "DeletionProtection whether the resource should be protected against deletion.", + "disks": "Disks is a list of disks to be attached to the VM.", + "labels": "Labels list of labels to apply to the VM.", + "gcpMetadata": "Metadata key/value pairs to apply to the VM.", + "networkInterfaces": "NetworkInterfaces is a list of network interfaces to be attached to the VM.", + "serviceAccounts": "ServiceAccounts is a list of GCP service accounts to be used by the VM.", + "tags": "Tags list of tags to apply to the VM.", + "targetPools": "TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", + "machineType": "MachineType is the machine type to use for the VM.", + "region": "Region is the region in which the GCP machine provider will create the VM.", + "zone": "Zone is the zone in which the GCP machine provider will create the VM.", + "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", + "preemptible": "Preemptible indicates if created instance is preemptible", +} + +func (GCPMachineProviderSpec) SwaggerDoc() map[string]string { + return map_GCPMachineProviderSpec +} + +var map_GCPMachineProviderStatus = map[string]string{ + "": "GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains GCP-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the ID of the instance in GCP", + "instanceState": "InstanceState is the provisioning state of the GCP Instance.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (GCPMachineProviderStatus) SwaggerDoc() map[string]string { + return map_GCPMachineProviderStatus +} + +var map_GCPMetadata = map[string]string{ + "": "GCPMetadata describes metadata for GCP.", + "key": "Key is the metadata key.", + "value": "Value is the metadata value.", +} + +func (GCPMetadata) SwaggerDoc() map[string]string { + return map_GCPMetadata +} + +var map_GCPNetworkInterface = map[string]string{ + "": "GCPNetworkInterface describes network interfaces for GCP", + "publicIP": "PublicIP indicates if true a public IP will be used", + "network": "Network is the network name.", + "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", + "subnetwork": "Subnetwork is the subnetwork name.", +} + +func (GCPNetworkInterface) SwaggerDoc() map[string]string { + return map_GCPNetworkInterface +} + +var map_GCPServiceAccount = map[string]string{ + "": "GCPServiceAccount describes service accounts for GCP.", + "email": "Email is the service account email.", + "scopes": "Scopes list of scopes to be assigned to the service account.", +} + +func (GCPServiceAccount) SwaggerDoc() map[string]string { + return map_GCPServiceAccount +} + +var map_LastOperation = map[string]string{ + "": "LastOperation represents the detail of the last performed operation on the MachineObject.", + "description": "Description is the human-readable description of the last operation.", + "lastUpdated": "LastUpdated is the timestamp at which LastOperation API was last-updated.", + "state": "State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", + "type": "Type is the type of operation which was last performed. E.g. Create, Delete, Update etc", +} + +func (LastOperation) SwaggerDoc() map[string]string { + return map_LastOperation +} + +var map_Machine = map[string]string{ + "": "Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (Machine) SwaggerDoc() map[string]string { + return map_Machine +} + +var map_MachineList = map[string]string{ + "": "MachineList contains a list of Machine Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineList) SwaggerDoc() map[string]string { + return map_MachineList +} + +var map_MachineSpec = map[string]string{ + "": "MachineSpec defines the desired state of Machine", + "metadata": "ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node.", + "taints": "The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints", + "providerSpec": "ProviderSpec details Provider-specific configuration to use during node creation.", + "providerID": "ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", +} + +func (MachineSpec) SwaggerDoc() map[string]string { + return map_MachineSpec +} + +var map_MachineStatus = map[string]string{ + "": "MachineStatus defines the observed state of Machine", + "nodeRef": "NodeRef will point to the corresponding Node if it exists.", + "lastUpdated": "LastUpdated identifies when this status was last observed.", + "errorReason": "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "errorMessage": "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "providerStatus": "ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", + "addresses": "Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", + "lastOperation": "LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", + "phase": "Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", + "conditions": "Conditions defines the current state of the Machine", +} + +func (MachineStatus) SwaggerDoc() map[string]string { + return map_MachineStatus +} + +var map_MachineHealthCheck = map[string]string{ + "": "MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "spec": "Specification of machine health check policy", + "status": "Most recently observed status of MachineHealthCheck resource", +} + +func (MachineHealthCheck) SwaggerDoc() map[string]string { + return map_MachineHealthCheck +} + +var map_MachineHealthCheckList = map[string]string{ + "": "MachineHealthCheckList contains a list of MachineHealthCheck Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineHealthCheckList) SwaggerDoc() map[string]string { + return map_MachineHealthCheckList +} + +var map_MachineHealthCheckSpec = map[string]string{ + "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck", + "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.", + "unhealthyConditions": "UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", + "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.", + "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", + "remediationTemplate": "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", +} + +func (MachineHealthCheckSpec) SwaggerDoc() map[string]string { + return map_MachineHealthCheckSpec +} + +var map_MachineHealthCheckStatus = map[string]string{ + "": "MachineHealthCheckStatus defines the observed state of MachineHealthCheck", + "expectedMachines": "total number of machines counted by this machine health check", + "currentHealthy": "total number of machines counted by this machine health check", + "remediationsAllowed": "RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", + "conditions": "Conditions defines the current state of the MachineHealthCheck", +} + +func (MachineHealthCheckStatus) SwaggerDoc() map[string]string { + return map_MachineHealthCheckStatus +} + +var map_UnhealthyCondition = map[string]string{ + "": "UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy.", + "timeout": "Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", +} + +func (UnhealthyCondition) SwaggerDoc() map[string]string { + return map_UnhealthyCondition +} + +var map_MachineSet = map[string]string{ + "": "MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineSet) SwaggerDoc() map[string]string { + return map_MachineSet +} + +var map_MachineSetList = map[string]string{ + "": "MachineSetList contains a list of MachineSet Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (MachineSetList) SwaggerDoc() map[string]string { + return map_MachineSetList +} + +var map_MachineSetSpec = map[string]string{ + "": "MachineSetSpec defines the desired state of MachineSet", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", + "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + "deletePolicy": "DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", + "selector": "Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the machine that will be created if insufficient replicas are detected.", +} + +func (MachineSetSpec) SwaggerDoc() map[string]string { + return map_MachineSetSpec +} + +var map_MachineSetStatus = map[string]string{ + "": "MachineSetStatus defines the observed state of MachineSet", + "replicas": "Replicas is the most recently observed number of replicas.", + "fullyLabeledReplicas": "The number of replicas that have labels matching the labels of the machine template of the MachineSet.", + "readyReplicas": "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\".", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this MachineSet.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed MachineSet.", + "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", +} + +func (MachineSetStatus) SwaggerDoc() map[string]string { + return map_MachineSetStatus +} + +var map_MachineTemplateSpec = map[string]string{ + "": "MachineTemplateSpec describes the data needed to create a Machine from a template", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (MachineTemplateSpec) SwaggerDoc() map[string]string { + return map_MachineTemplateSpec +} + +var map_Condition = map[string]string{ + "": "Condition defines an observation of a Machine API resource operational state.", + "type": "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + "status": "Status of the condition, one of True, False, Unknown.", + "severity": "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", + "lastTransitionTime": "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", + "reason": "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", + "message": "A human readable message indicating details about the transition. This field may be empty.", +} + +func (Condition) SwaggerDoc() map[string]string { + return map_Condition +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. This is a copy of customizable fields from metav1.ObjectMeta.\n\nObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases and read-only fields which end up in the generated CRD validation, having it as a subset simplifies the API and some issues that can impact user experience.\n\nDuring the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, specifically `spec.metadata.creationTimestamp in body must be of type string: \"null\"`. The investigation showed that `controller-tools@v2` behaves differently than its previous version when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.\n\nIn more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` had validation properties, including for `creationTimestamp` (metav1.Time). The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` which breaks validation because the field isn't marked as nullable.\n\nIn future versions, controller-tools@v2 might allow overriding the type and validation for embedded types. When that happens, this hack should be revisited.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_ProviderSpec = map[string]string{ + "": "ProviderSpec defines the configuration to use during node creation.", + "value": "Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", +} + +func (ProviderSpec) SwaggerDoc() map[string]string { + return map_ProviderSpec +} + +var map_NetworkDeviceSpec = map[string]string{ + "": "NetworkDeviceSpec defines the network configuration for a virtual machine's network device.", + "networkName": "NetworkName is the name of the vSphere network to which the device will be connected.", +} + +func (NetworkDeviceSpec) SwaggerDoc() map[string]string { + return map_NetworkDeviceSpec +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec defines the virtual machine's network configuration.", + "devices": "Devices defines the virtual machine's network interfaces.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_VSphereMachineProviderCondition = map[string]string{ + "": "VSphereMachineProviderCondition is a condition in a VSphereMachineProviderStatus.", + "type": "Type is the type of the condition.", + "status": "Status is the status of the condition.", + "lastProbeTime": "LastProbeTime is the last time we probed the condition.", + "lastTransitionTime": "LastTransitionTime is the last time the condition transitioned from one status to another.", + "reason": "Reason is a unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Message is a human-readable message indicating details about last transition.", +} + +func (VSphereMachineProviderCondition) SwaggerDoc() map[string]string { + return map_VSphereMachineProviderCondition +} + +var map_VSphereMachineProviderSpec = map[string]string{ + "": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with vSphere credentials.", + "template": "Template is the name, inventory path, or instance UUID of the template used to clone new machines.", + "workspace": "Workspace describes the workspace to use for the machine.", + "network": "Network is the network configuration for this machine's VM.", + "numCPUs": "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "numCoresPerSocket": "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "memoryMiB": "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "diskGiB": "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "snapshot": "Snapshot is the name of the snapshot from which the VM was cloned", + "cloneMode": "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to LinkedClone, but fails gracefully to FullClone if the source of the clone operation has no snapshots.", +} + +func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string { + return map_VSphereMachineProviderSpec +} + +var map_VSphereMachineProviderStatus = map[string]string{ + "": "VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains VSphere-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the ID of the instance in VSphere", + "instanceState": "InstanceState is the provisioning state of the VSphere Instance.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "taskRef": "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", +} + +func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string { + return map_VSphereMachineProviderStatus +} + +var map_Workspace = map[string]string{ + "": "WorkspaceConfig defines a workspace configuration for the vSphere cloud provider.", + "server": "Server is the IP address or FQDN of the vSphere endpoint.", + "datacenter": "Datacenter is the datacenter in which VMs are created/located.", + "folder": "Folder is the folder in which VMs are created/located.", + "datastore": "Datastore is the datastore in which VMs are created/located.", + "resourcePool": "ResourcePool is the resource pool in which VMs are created/located.", +} + +func (Workspace) SwaggerDoc() map[string]string { + return map_Workspace +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml index 330dff7bf..acb2c4058 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml @@ -132,10 +132,11 @@ spec: description: "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc. \n If unset, the default is based on infrastructure.config.openshift.io/cluster - .status.platform: \n AWS: LoadBalancerService (with External - scope) Azure: LoadBalancerService (with External scope) GCP: - \ LoadBalancerService (with External scope) IBMCloud: LoadBalancerService - (with External scope) Libvirt: HostNetwork \n Any other platform + .status.platform: \n AWS: LoadBalancerService (with External + scope) Azure: LoadBalancerService (with External scope) + \ GCP: LoadBalancerService (with External scope) IBMCloud: + \ LoadBalancerService (with External scope) AlibabaCloud: LoadBalancerService + (with External scope) Libvirt: HostNetwork \n Any other platform types (including None) default to HostNetwork. \n endpointPublishingStrategy cannot be updated." properties: @@ -328,6 +329,47 @@ spec: required: - type type: object + httpCompression: + description: httpCompression defines a policy for HTTP traffic compression. + By default, there is no HTTP compression. + properties: + mimeTypes: + description: "mimeTypes is a list of MIME types that should have + compression applied. This list can be empty, in which case the + ingress controller does not apply compression. \n Note: Not + all MIME types benefit from compression, but HAProxy will still + use resources to try to compress if instructed to. Generally + speaking, text (html, css, js, etc.) formats benefit from compression, + but formats that are already compressed (image, audio, video, + etc.) benefit little in exchange for the time and cpu spent + on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2" + items: + description: "CompressionMIMEType defines the format of a single + MIME type. E.g. \"text/css; charset=utf-8\", \"text/html\", + \"text/*\", \"image/svg+xml\", \"application/octet-stream\", + \"X-custom/customsub\", etc. \n The format should follow the + Content-Type definition in RFC 1341: Content-Type := type + \"/\" subtype *[\";\" parameter] - The type in Content-Type + can be one of: application, audio, image, message, multipart, + text, video, or a custom type preceded by \"X-\" and followed + by a token as defined below. - The token is a string of at + least one character, and not containing white space, control + characters, or any of the characters in the tspecials set. + - The tspecials set contains the characters ()<>@,;:\\\"/[]?.= + - The subtype in Content-Type is also a token. - The optional + parameter/s following the subtype are defined as: token + \"=\" (token / quoted-string) - The quoted-string, as defined + in RFC 822, is surrounded by double quotes and can contain + white space plus any character EXCEPT \\, \", and CR. It + can also contain any single ASCII character as long as it + is escaped by \\." + pattern: ^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ + ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ + ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$ + type: string + type: array + x-kubernetes-list-type: set + type: object httpEmptyRequestsPolicy: default: Respond description: "httpEmptyRequestsPolicy describes how HTTP connections @@ -510,6 +552,15 @@ spec: - local6 - local7 type: string + maxLength: + default: 1024 + description: "maxLength is the maximum length of the + syslog message \n If this field is empty, the maxLength + is set to \"1024\"." + format: int32 + maximum: 4096 + minimum: 480 + type: integer port: description: port is the UDP port number of the syslog endpoint that receives log messages. diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml new file mode 100644 index 000000000..ab52d00b1 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml @@ -0,0 +1,631 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/475 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: networks.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network describes the cluster's desired network configuration. + It is consumed by the cluster-network-operator. \n Compatibility level 1: + Stable within a major release for a minimum of 12 months or 3 minor releases + (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSpec is the top-level network configuration object. + properties: + additionalNetworks: + description: additionalNetworks is a list of extra networks to make + available to pods when multiple networks are enabled. + items: + description: AdditionalNetworkDefinition configures an extra network + that is available but not created by default. Instead, pods must + request them by name. type must be specified, along with exactly + one "Config" that matches the type. + properties: + name: + description: name is the name of the network. This will be populated + in the resulting CRD This must be unique. + type: string + namespace: + description: namespace is the namespace of the network. This + will be populated in the resulting CRD If not given the network + will be created in the default namespace. + type: string + rawCNIConfig: + description: rawCNIConfig is the raw CNI configuration json + to create in the NetworkAttachmentDefinition CRD + type: string + simpleMacvlanConfig: + description: SimpleMacvlanConfig configures the macvlan interface + in case of type:NetworkTypeSimpleMacvlan + properties: + ipamConfig: + description: IPAMConfig configures IPAM module will be used + for IP Address Management (IPAM). + properties: + staticIPAMConfig: + description: StaticIPAMConfig configures the static + IP address in case of type:IPAMTypeStatic + properties: + addresses: + description: Addresses configures IP address for + the interface + items: + description: StaticIPAMAddresses provides IP address + and Gateway for static IPAM addresses + properties: + address: + description: Address is the IP address in + CIDR format + type: string + gateway: + description: Gateway is IP inside of subnet + to designate as the gateway + type: string + type: object + type: array + dns: + description: DNS configures DNS for the interface + properties: + domain: + description: Domain configures the domainname + the local domain used for short hostname lookups + type: string + nameservers: + description: Nameservers points DNS servers + for IP lookup + items: + type: string + type: array + search: + description: Search configures priority ordered + search domains for short hostname lookups + items: + type: string + type: array + type: object + routes: + description: Routes configures IP routes for the + interface + items: + description: StaticIPAMRoutes provides Destination/Gateway + pairs for static IPAM routes + properties: + destination: + description: Destination points the IP route + destination + type: string + gateway: + description: Gateway is the route's next-hop + IP address If unset, a default gateway is + assumed (as determined by the CNI plugin). + type: string + type: object + type: array + type: object + type: + description: Type is the type of IPAM module will be + used for IP Address Management(IPAM). The supported + values are IPAMTypeDHCP, IPAMTypeStatic + type: string + type: object + master: + description: master is the host interface to create the + macvlan interface from. If not specified, it will be default + route interface + type: string + mode: + description: 'mode is the macvlan mode: bridge, private, + vepa, passthru. The default is bridge' + type: string + mtu: + description: mtu is the mtu to use for the macvlan interface. + if unset, host's kernel will select the value. + format: int32 + minimum: 0 + type: integer + type: object + type: + description: type is the type of network The supported values + are NetworkTypeRaw, NetworkTypeSimpleMacvlan + type: string + type: object + type: array + clusterNetwork: + description: clusterNetwork is the IP address pool to use for pod + IPs. Some network providers, e.g. OpenShift SDN, support multiple + ClusterNetworks. Others only support one. This is equivalent to + the cluster-cidr. + items: + description: ClusterNetworkEntry is a subnet from which to allocate + PodIPs. A network of size HostPrefix (in CIDR notation) will be + allocated when nodes join the cluster. If the HostPrefix field + is not used by the plugin, it can be left unset. Not all network + providers support multiple ClusterNetworks + properties: + cidr: + type: string + hostPrefix: + format: int32 + minimum: 0 + type: integer + type: object + type: array + defaultNetwork: + description: defaultNetwork is the "default" network that all pods + will receive + properties: + kuryrConfig: + description: KuryrConfig configures the kuryr plugin + properties: + controllerProbesPort: + description: The port kuryr-controller will listen for readiness + and liveness requests. + format: int32 + minimum: 0 + type: integer + daemonProbesPort: + description: The port kuryr-daemon will listen for readiness + and liveness requests. + format: int32 + minimum: 0 + type: integer + enablePortPoolsPrepopulation: + description: enablePortPoolsPrepopulation when true will make + Kuryr prepopulate each newly created port pool with a minimum + number of ports. Kuryr uses Neutron port pooling to fight + the fact that it takes a significant amount of time to create + one. Instead of creating it when pod is being deployed, + Kuryr keeps a number of ports ready to be attached to pods. + By default port prepopulation is disabled. + type: boolean + mtu: + description: mtu is the MTU that Kuryr should use when creating + pod networks in Neutron. The value has to be lower or equal + to the MTU of the nodes network and Neutron has to allow + creation of tenant networks with such MTU. If unset Pod + networks will be created with the same MTU as the nodes + network has. + format: int32 + minimum: 0 + type: integer + openStackServiceNetwork: + description: openStackServiceNetwork contains the CIDR of + network from which to allocate IPs for OpenStack Octavia's + Amphora VMs. Please note that with Amphora driver Octavia + uses two IPs from that network for each loadbalancer - one + given by OpenShift and second for VRRP connections. As the + first one is managed by OpenShift's and second by Neutron's + IPAMs, those need to come from different pools. Therefore + `openStackServiceNetwork` needs to be at least twice the + size of `serviceNetwork`, and whole `serviceNetwork` must + be overlapping with `openStackServiceNetwork`. cluster-network-operator + will then make sure VRRP IPs are taken from the ranges inside + `openStackServiceNetwork` that are not overlapping with + `serviceNetwork`, effectivly preventing conflicts. If not + set cluster-network-operator will use `serviceNetwork` expanded + by decrementing the prefix size by 1. + type: string + poolBatchPorts: + description: poolBatchPorts sets a number of ports that should + be created in a single batch request to extend the port + pool. The default is 3. For more information about port + pools see enablePortPoolsPrepopulation setting. + minimum: 0 + type: integer + poolMaxPorts: + description: poolMaxPorts sets a maximum number of free ports + that are being kept in a port pool. If the number of ports + exceeds this setting, free ports will get deleted. Setting + 0 will disable this upper bound, effectively preventing + pools from shrinking and this is the default value. For + more information about port pools see enablePortPoolsPrepopulation + setting. + minimum: 0 + type: integer + poolMinPorts: + description: poolMinPorts sets a minimum number of free ports + that should be kept in a port pool. If the number of ports + is lower than this setting, new ports will get created and + added to pool. The default is 1. For more information about + port pools see enablePortPoolsPrepopulation setting. + minimum: 1 + type: integer + type: object + openshiftSDNConfig: + description: openShiftSDNConfig configures the openshift-sdn plugin + properties: + enableUnidling: + description: enableUnidling controls whether or not the service + proxy will support idling and unidling of services. By default, + unidling is enabled. + type: boolean + mode: + description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + type: string + mtu: + description: mtu is the mtu to use for the tunnel interface. + Defaults to 1450 if unset. This must be 50 bytes smaller + than the machine's uplink. + format: int32 + minimum: 0 + type: integer + useExternalOpenvswitch: + description: 'useExternalOpenvswitch used to control whether + the operator would deploy an OVS DaemonSet itself or expect + someone else to start OVS. As of 4.6, OVS is always run + as a system service, and this flag is ignored. DEPRECATED: + non-functional as of 4.6' + type: boolean + vxlanPort: + description: vxlanPort is the port to use for all vxlan packets. + The default is 4789. + format: int32 + minimum: 0 + type: integer + type: object + ovnKubernetesConfig: + description: oVNKubernetesConfig configures the ovn-kubernetes + plugin. This is currently not implemented. + properties: + genevePort: + description: geneve port is the UDP port to be used by geneve + encapulation. Default is 6081 + format: int32 + minimum: 1 + type: integer + hybridOverlayConfig: + description: HybridOverlayConfig configures an additional + overlay network for peers that are not using OVN. + properties: + hybridClusterNetwork: + description: HybridClusterNetwork defines a network space + given to nodes on an additional overlay network. + items: + description: ClusterNetworkEntry is a subnet from which + to allocate PodIPs. A network of size HostPrefix (in + CIDR notation) will be allocated when nodes join the + cluster. If the HostPrefix field is not used by the + plugin, it can be left unset. Not all network providers + support multiple ClusterNetworks + properties: + cidr: + type: string + hostPrefix: + format: int32 + minimum: 0 + type: integer + type: object + type: array + hybridOverlayVXLANPort: + description: HybridOverlayVXLANPort defines the VXLAN + port number to be used by the additional overlay network. + Default is 4789 + format: int32 + type: integer + type: object + ipsecConfig: + description: ipsecConfig enables and configures IPsec for + pods on the pod network within the cluster. + type: object + mtu: + description: mtu is the MTU to use for the tunnel interface. + This must be 100 bytes smaller than the uplink mtu. Default + is 1400 + format: int32 + minimum: 0 + type: integer + policyAuditConfig: + default: {} + description: policyAuditConfig is the configuration for network + policy audit events. If unset, reported defaults are used. + properties: + destination: + default: "null" + description: 'destination is the location for policy log + messages. Regardless of this config, persistent logs + will always be dumped to the host at /var/log/ovn/ however + Additionally syslog output may be configured as follows. + Valid values are: - "libc" -> to use the libc syslog() + function of the host node''s journdald process - "udp:host:port" + -> for sending syslog over UDP - "unix:file" -> for + using the UNIX domain socket directly - "null" -> to + discard all messages logged to syslog The default is + "null"' + type: string + maxFileSize: + default: 50 + description: maxFilesSize is the max size an ACL_audit + log file is allowed to reach before rotation occurs + Units are in MB and the Default is 50MB + format: int32 + minimum: 1 + type: integer + rateLimit: + default: 20 + description: rateLimit is the approximate maximum number + of messages to generate per-second per-node. If unset + the default of 20 msg/sec is used. + format: int32 + minimum: 1 + type: integer + syslogFacility: + default: local0 + description: syslogFacility the RFC5424 facility for generated + messages, e.g. "kern". Default is "local0" + type: string + type: object + type: object + type: + description: type is the type of network All NetworkTypes are + supported except for NetworkTypeRaw + type: string + type: object + deployKubeProxy: + description: deployKubeProxy specifies whether or not a standalone + kube-proxy should be deployed by the operator. Some network providers + include kube-proxy or similar functionality. If unset, the plugin + will attempt to select the correct value, which is false when OpenShift + SDN and ovn-kubernetes are used and true otherwise. + type: boolean + disableMultiNetwork: + description: disableMultiNetwork specifies whether or not multiple + pod network support should be disabled. If unset, this property + defaults to 'false' and multiple network support is enabled. + type: boolean + disableNetworkDiagnostics: + default: false + description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck + CRs from a test pod to every node, apiserver and LB should be disabled + or not. If unset, this property defaults to 'false' and network + diagnostics is enabled. Setting this to 'true' would reduce the + additional load of the pods performing the checks. + type: boolean + exportNetworkFlows: + description: exportNetworkFlows enables and configures the export + of network flow metadata from the pod network by using protocols + NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes + plugin. If unset, flows will not be exported to any collector. + properties: + ipfix: + description: ipfix defines IPFIX configuration. + properties: + collectors: + description: ipfixCollectors is list of strings formatted + as ip:port with a maximum of ten items + items: + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + maxItems: 10 + minItems: 1 + type: array + type: object + netFlow: + description: netFlow defines the NetFlow configuration. + properties: + collectors: + description: netFlow defines the NetFlow collectors that will + consume the flow data exported from OVS. It is a list of + strings formatted as ip:port with a maximum of ten items + items: + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + maxItems: 10 + minItems: 1 + type: array + type: object + sFlow: + description: sFlow defines the SFlow configuration. + properties: + collectors: + description: sFlowCollectors is list of strings formatted + as ip:port with a maximum of ten items + items: + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + maxItems: 10 + minItems: 1 + type: array + type: object + type: object + kubeProxyConfig: + description: kubeProxyConfig lets us configure desired proxy configuration. + If not specified, sensible defaults will be chosen by OpenShift + directly. Not consumed by all network providers - currently only + openshift-sdn. + properties: + bindAddress: + description: The address to "bind" on Defaults to 0.0.0.0 + type: string + iptablesSyncPeriod: + description: 'An internal kube-proxy parameter. In older releases + of OCP, this sometimes needed to be adjusted in large clusters + for performance reasons, but this is no longer necessary, and + there is no reason to change this from the default value. Default: + 30s' + type: string + proxyArguments: + additionalProperties: + description: ProxyArgumentList is a list of arguments to pass + to the kubeproxy process + items: + type: string + type: array + description: Any additional arguments to pass to the kubeproxy + process + type: object + type: object + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + migration: + description: migration enables and configures the cluster network + migration. Setting this to the target network type to allow changing + the default network. If unset, the operation of changing cluster + default network plugin will be rejected. + properties: + networkType: + description: networkType is the target type of network migration + The supported values are OpenShiftSDN, OVNKubernetes + type: string + type: object + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + serviceNetwork: + description: serviceNetwork is the ip address pool to use for Service + IPs Currently, all existing network providers only support a single + value here, but this is an array to allow for growth. + items: + type: string + type: array + unsupportedConfigOverrides: + description: 'unsupportedConfigOverrides holds a sparse config that + will override any previously set options. It only needs to be the + fields to override it will end up overlaying in the following order: + 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + useMultiNetworkPolicy: + description: useMultiNetworkPolicy enables a controller which allows + for MultiNetworkPolicy objects to be used on additional networks + as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy + objects, but NetworkPolicy objects only apply to the primary interface. + With MultiNetworkPolicy, you can control the traffic that a pod + can receive over the secondary interfaces. If unset, this property + defaults to 'false' and MultiNetworkPolicy objects are ignored. + If 'disableMultiNetwork' is 'true' then the value of this field + is ignored. + type: boolean + type: object + status: + description: NetworkStatus is detailed operator status, which is distilled + up to the Network clusteroperator object. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + type: object + served: true + storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml-patch new file mode 100644 index 000000000..094f526ec --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml-patch @@ -0,0 +1,3 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/defaultNetwork/properties/ovnKubernetesConfig/properties/policyAuditConfig/default + value: {} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml deleted file mode 100644 index c0dcce7df..000000000 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01_crd.yaml +++ /dev/null @@ -1,443 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - api-approved.openshift.io: https://github.com/openshift/api/pull/475 - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - name: networks.operator.openshift.io -spec: - group: operator.openshift.io - names: - kind: Network - listKind: NetworkList - plural: networks - singular: network - scope: Cluster - versions: - - name: v1 - schema: - openAPIV3Schema: - description: "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: NetworkSpec is the top-level network configuration object. - type: object - properties: - additionalNetworks: - description: additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled. - type: array - items: - description: AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one "Config" that matches the type. - type: object - properties: - name: - description: name is the name of the network. This will be populated in the resulting CRD This must be unique. - type: string - namespace: - description: namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace. - type: string - rawCNIConfig: - description: rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD - type: string - simpleMacvlanConfig: - description: SimpleMacvlanConfig configures the macvlan interface in case of type:NetworkTypeSimpleMacvlan - type: object - properties: - ipamConfig: - description: IPAMConfig configures IPAM module will be used for IP Address Management (IPAM). - type: object - properties: - staticIPAMConfig: - description: StaticIPAMConfig configures the static IP address in case of type:IPAMTypeStatic - type: object - properties: - addresses: - description: Addresses configures IP address for the interface - type: array - items: - description: StaticIPAMAddresses provides IP address and Gateway for static IPAM addresses - type: object - properties: - address: - description: Address is the IP address in CIDR format - type: string - gateway: - description: Gateway is IP inside of subnet to designate as the gateway - type: string - dns: - description: DNS configures DNS for the interface - type: object - properties: - domain: - description: Domain configures the domainname the local domain used for short hostname lookups - type: string - nameservers: - description: Nameservers points DNS servers for IP lookup - type: array - items: - type: string - search: - description: Search configures priority ordered search domains for short hostname lookups - type: array - items: - type: string - routes: - description: Routes configures IP routes for the interface - type: array - items: - description: StaticIPAMRoutes provides Destination/Gateway pairs for static IPAM routes - type: object - properties: - destination: - description: Destination points the IP route destination - type: string - gateway: - description: Gateway is the route's next-hop IP address If unset, a default gateway is assumed (as determined by the CNI plugin). - type: string - type: - description: Type is the type of IPAM module will be used for IP Address Management(IPAM). The supported values are IPAMTypeDHCP, IPAMTypeStatic - type: string - master: - description: master is the host interface to create the macvlan interface from. If not specified, it will be default route interface - type: string - mode: - description: 'mode is the macvlan mode: bridge, private, vepa, passthru. The default is bridge' - type: string - mtu: - description: mtu is the mtu to use for the macvlan interface. if unset, host's kernel will select the value. - type: integer - format: int32 - minimum: 0 - type: - description: type is the type of network The supported values are NetworkTypeRaw, NetworkTypeSimpleMacvlan - type: string - clusterNetwork: - description: clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr. - type: array - items: - description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks - type: object - properties: - cidr: - type: string - hostPrefix: - type: integer - format: int32 - minimum: 0 - defaultNetwork: - description: defaultNetwork is the "default" network that all pods will receive - type: object - properties: - kuryrConfig: - description: KuryrConfig configures the kuryr plugin - type: object - properties: - controllerProbesPort: - description: The port kuryr-controller will listen for readiness and liveness requests. - type: integer - format: int32 - minimum: 0 - daemonProbesPort: - description: The port kuryr-daemon will listen for readiness and liveness requests. - type: integer - format: int32 - minimum: 0 - enablePortPoolsPrepopulation: - description: enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. Instead of creating it when pod is being deployed, Kuryr keeps a number of ports ready to be attached to pods. By default port prepopulation is disabled. - type: boolean - mtu: - description: mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. - type: integer - format: int32 - minimum: 0 - openStackServiceNetwork: - description: openStackServiceNetwork contains the CIDR of network from which to allocate IPs for OpenStack Octavia's Amphora VMs. Please note that with Amphora driver Octavia uses two IPs from that network for each loadbalancer - one given by OpenShift and second for VRRP connections. As the first one is managed by OpenShift's and second by Neutron's IPAMs, those need to come from different pools. Therefore `openStackServiceNetwork` needs to be at least twice the size of `serviceNetwork`, and whole `serviceNetwork` must be overlapping with `openStackServiceNetwork`. cluster-network-operator will then make sure VRRP IPs are taken from the ranges inside `openStackServiceNetwork` that are not overlapping with `serviceNetwork`, effectivly preventing conflicts. If not set cluster-network-operator will use `serviceNetwork` expanded by decrementing the prefix size by 1. - type: string - poolBatchPorts: - description: poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting. - type: integer - minimum: 0 - poolMaxPorts: - description: poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting. - type: integer - minimum: 0 - poolMinPorts: - description: poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting. - type: integer - minimum: 1 - openshiftSDNConfig: - description: openShiftSDNConfig configures the openshift-sdn plugin - type: object - properties: - enableUnidling: - description: enableUnidling controls whether or not the service proxy will support idling and unidling of services. By default, unidling is enabled. - type: boolean - mode: - description: mode is one of "Multitenant", "Subnet", or "NetworkPolicy" - type: string - mtu: - description: mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink. - type: integer - format: int32 - minimum: 0 - useExternalOpenvswitch: - description: 'useExternalOpenvswitch used to control whether the operator would deploy an OVS DaemonSet itself or expect someone else to start OVS. As of 4.6, OVS is always run as a system service, and this flag is ignored. DEPRECATED: non-functional as of 4.6' - type: boolean - vxlanPort: - description: vxlanPort is the port to use for all vxlan packets. The default is 4789. - type: integer - format: int32 - minimum: 0 - ovnKubernetesConfig: - description: oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently not implemented. - type: object - properties: - genevePort: - description: geneve port is the UDP port to be used by geneve encapulation. Default is 6081 - type: integer - format: int32 - minimum: 1 - hybridOverlayConfig: - description: HybridOverlayConfig configures an additional overlay network for peers that are not using OVN. - type: object - properties: - hybridClusterNetwork: - description: HybridClusterNetwork defines a network space given to nodes on an additional overlay network. - type: array - items: - description: ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. If the HostPrefix field is not used by the plugin, it can be left unset. Not all network providers support multiple ClusterNetworks - type: object - properties: - cidr: - type: string - hostPrefix: - type: integer - format: int32 - minimum: 0 - hybridOverlayVXLANPort: - description: HybridOverlayVXLANPort defines the VXLAN port number to be used by the additional overlay network. Default is 4789 - type: integer - format: int32 - ipsecConfig: - description: ipsecConfig enables and configures IPsec for pods on the pod network within the cluster. - type: object - mtu: - description: mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400 - type: integer - format: int32 - minimum: 0 - policyAuditConfig: - description: policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used. - type: object - properties: - destination: - description: 'destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - "libc" -> to use the libc syslog() function of the host node''s journdald process - "udp:host:port" -> for sending syslog over UDP - "unix:file" -> for using the UNIX domain socket directly - "null" -> to discard all messages logged to syslog The default is "null"' - type: string - default: "null" - maxFileSize: - description: maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB - type: integer - format: int32 - default: 50 - minimum: 1 - rateLimit: - description: rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used. - type: integer - format: int32 - default: 20 - minimum: 1 - syslogFacility: - description: syslogFacility the RFC5424 facility for generated messages, e.g. "kern". Default is "local0" - type: string - default: local0 - type: - description: type is the type of network All NetworkTypes are supported except for NetworkTypeRaw - type: string - deployKubeProxy: - description: deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise. - type: boolean - disableMultiNetwork: - description: disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled. - type: boolean - disableNetworkDiagnostics: - description: disableNetworkDiagnostics specifies whether or not PodNetworkConnectivityCheck CRs from a test pod to every node, apiserver and LB should be disabled or not. If unset, this property defaults to 'false' and network diagnostics is enabled. Setting this to 'true' would reduce the additional load of the pods performing the checks. - type: boolean - default: false - exportNetworkFlows: - description: exportNetworkFlows enables and configures the export of network flow metadata from the pod network by using protocols NetFlow, SFlow or IPFIX. Currently only supported on OVN-Kubernetes plugin. If unset, flows will not be exported to any collector. - type: object - properties: - ipfix: - description: ipfix defines IPFIX configuration. - type: object - properties: - collectors: - description: ipfixCollectors is list of strings formatted as ip:port with a maximum of ten items - type: array - maxItems: 10 - minItems: 1 - items: - type: string - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - netFlow: - description: netFlow defines the NetFlow configuration. - type: object - properties: - collectors: - description: netFlow defines the NetFlow collectors that will consume the flow data exported from OVS. It is a list of strings formatted as ip:port with a maximum of ten items - type: array - maxItems: 10 - minItems: 1 - items: - type: string - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - sFlow: - description: sFlow defines the SFlow configuration. - type: object - properties: - collectors: - description: sFlowCollectors is list of strings formatted as ip:port with a maximum of ten items - type: array - maxItems: 10 - minItems: 1 - items: - type: string - pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]):([1-9][0-9]{0,3}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ - kubeProxyConfig: - description: kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn. - type: object - properties: - bindAddress: - description: The address to "bind" on Defaults to 0.0.0.0 - type: string - iptablesSyncPeriod: - description: 'An internal kube-proxy parameter. In older releases of OCP, this sometimes needed to be adjusted in large clusters for performance reasons, but this is no longer necessary, and there is no reason to change this from the default value. Default: 30s' - type: string - proxyArguments: - description: Any additional arguments to pass to the kubeproxy process - type: object - additionalProperties: - description: ProxyArgumentList is a list of arguments to pass to the kubeproxy process - type: array - items: - type: string - logLevel: - description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - managementState: - description: managementState indicates whether and how the operator should manage the component - type: string - pattern: ^(Managed|Unmanaged|Force|Removed)$ - migration: - description: migration enables and configures the cluster network migration. Setting this to the target network type to allow changing the default network. If unset, the operation of changing cluster default network plugin will be rejected. - type: object - properties: - networkType: - description: networkType is the target type of network migration The supported values are OpenShiftSDN, OVNKubernetes - type: string - observedConfig: - description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - operatorLogLevel: - description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." - type: string - default: Normal - enum: - - "" - - Normal - - Debug - - Trace - - TraceAll - serviceNetwork: - description: serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth. - type: array - items: - type: string - unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' - type: object - nullable: true - x-kubernetes-preserve-unknown-fields: true - useMultiNetworkPolicy: - description: useMultiNetworkPolicy enables a controller which allows for MultiNetworkPolicy objects to be used on additional networks as created by Multus CNI. MultiNetworkPolicy are similar to NetworkPolicy objects, but NetworkPolicy objects only apply to the primary interface. With MultiNetworkPolicy, you can control the traffic that a pod can receive over the secondary interfaces. If unset, this property defaults to 'false' and MultiNetworkPolicy objects are ignored. If 'disableMultiNetwork' is 'true' then the value of this field is ignored. - type: boolean - status: - description: NetworkStatus is detailed operator status, which is distilled up to the Network clusteroperator object. - type: object - properties: - conditions: - description: conditions is a list of conditions and their status - type: array - items: - description: OperatorCondition is just the standard condition fields. - type: object - properties: - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - generations: - description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - type: array - items: - description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. - type: object - properties: - group: - description: group is the group of the thing you're tracking - type: string - hash: - description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps - type: string - lastGeneration: - description: lastGeneration is the last generation of the workload controller involved - type: integer - format: int64 - name: - description: name is the name of the thing you're tracking - type: string - namespace: - description: namespace is where the thing you're tracking is - type: string - resource: - description: resource is the resource type of the thing you're tracking - type: string - observedGeneration: - description: observedGeneration is the last generation change you've dealt with - type: integer - format: int64 - readyReplicas: - description: readyReplicas indicates how many replicas are ready and at the desired state - type: integer - format: int32 - version: - description: version is the level this availability applies to - type: string - served: true - storage: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml index 84ff00d91..054f908cc 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00-custom-resource-definition.yaml @@ -38,6 +38,14 @@ spec: description: spec is the specification of the desired behavior of the DNS. type: object properties: + logLevel: + description: 'logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses. Setting logLevel: Trace will produce extremely verbose logs. Valid values are: "Normal", "Debug", "Trace". Defaults to "Normal".' + type: string + default: Normal + enum: + - Normal + - Debug + - Trace managementState: description: managementState indicates whether the DNS operator should manage cluster DNS type: string @@ -74,6 +82,14 @@ spec: value: description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. type: string + operatorLogLevel: + description: 'operatorLogLevel controls the logging level of the DNS Operator. Valid values are: "Normal", "Debug", "Trace". Defaults to "Normal". setting operatorLogLevel: Trace will produce extremely verbose logs.' + type: string + default: Normal + enum: + - Normal + - Debug + - Trace servers: description: "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server. \n For example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\". \n If this field is nil, no servers are created." type: array @@ -85,8 +101,16 @@ spec: description: forwardPlugin defines a schema for configuring CoreDNS to proxy DNS messages to upstream resolvers. type: object properties: + policy: + description: "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified: \n * \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. \n The default value is \"Random\"" + type: string + default: Random + enum: + - Random + - RoundRobin + - Sequential upstreams: - description: "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Upstreams are randomized when more than 1 upstream is specified. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53. \n A maximum of 15 upstreams is allowed per ForwardPlugin." + description: "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53. \n A maximum of 15 upstreams is allowed per ForwardPlugin." type: array maxItems: 15 items: diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml index 78451b2fa..cd2fe7388 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml @@ -41,13 +41,16 @@ spec: - ebs.csi.aws.com - efs.csi.aws.com - disk.csi.azure.com + - file.csi.azure.com - pd.csi.storage.gke.io - cinder.csi.openstack.org - csi.vsphere.vmware.com - manila.csi.openstack.org - csi.ovirt.org - csi.kubevirt.io - - csi.shared-resources.openshift.io + - csi.sharedresource.openshift.io + - diskplugin.csi.alibabacloud.com + - vpc.block.csi.ibm.io type: string type: object spec: diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch index 508903a8c..44a72978c 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch @@ -7,10 +7,13 @@ - ebs.csi.aws.com - efs.csi.aws.com - disk.csi.azure.com + - file.csi.azure.com - pd.csi.storage.gke.io - cinder.csi.openstack.org - csi.vsphere.vmware.com - manila.csi.openstack.org - csi.ovirt.org - csi.kubevirt.io - - csi.shared-resources.openshift.io + - csi.sharedresource.openshift.io + - diskplugin.csi.alibabacloud.com + - vpc.block.csi.ibm.io diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index 1460d15e3..827c76dd8 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -45,13 +45,16 @@ const ( AWSEBSCSIDriver CSIDriverName = "ebs.csi.aws.com" AWSEFSCSIDriver CSIDriverName = "efs.csi.aws.com" AzureDiskCSIDriver CSIDriverName = "disk.csi.azure.com" + AzureFileCSIDriver CSIDriverName = "file.csi.azure.com" GCPPDCSIDriver CSIDriverName = "pd.csi.storage.gke.io" CinderCSIDriver CSIDriverName = "cinder.csi.openstack.org" VSphereCSIDriver CSIDriverName = "csi.vsphere.vmware.com" ManilaCSIDriver CSIDriverName = "manila.csi.openstack.org" OvirtCSIDriver CSIDriverName = "csi.ovirt.org" KubevirtCSIDriver CSIDriverName = "csi.kubevirt.io" - SharedResourcesCSIDriver CSIDriverName = "csi.shared-resources.openshift.io" + SharedResourcesCSIDriver CSIDriverName = "csi.sharedresource.openshift.io" + AlibabaDiskCSIDriver CSIDriverName = "diskplugin.csi.alibabacloud.com" + IBMVPCBlockCSIDriver CSIDriverName = "vpc.block.csi.ibm.io" ) // ClusterCSIDriverSpec is the desired behavior of CSI driver operator diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go index 4e6c7eea7..d73fed96e 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -70,8 +70,42 @@ type DNSSpec struct { // DNS // +optional ManagementState ManagementState `json:"managementState,omitempty"` + + // operatorLogLevel controls the logging level of the DNS Operator. + // Valid values are: "Normal", "Debug", "Trace". + // Defaults to "Normal". + // setting operatorLogLevel: Trace will produce extremely verbose logs. + // +optional + // +kubebuilder:default=Normal + OperatorLogLevel DNSLogLevel `json:"operatorLogLevel,omitempty"` + + // logLevel describes the desired logging verbosity for CoreDNS. + // Any one of the following values may be specified: + // * Normal logs errors from upstream resolvers. + // * Debug logs errors, NXDOMAIN responses, and NODATA responses. + // * Trace logs errors and all responses. + // Setting logLevel: Trace will produce extremely verbose logs. + // Valid values are: "Normal", "Debug", "Trace". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel DNSLogLevel `json:"logLevel,omitempty"` } +// +kubebuilder:validation:Enum:=Normal;Debug;Trace +type DNSLogLevel string + +var ( + // Normal is the default. Normal, working log information, everything is fine, but helpful notices for auditing or common operations. In kube, this is probably glog=2. + DNSLogLevelNormal DNSLogLevel = "Normal" + + // Debug is used when something went wrong. Even common operations may be logged, and less helpful but more quantity of notices. In kube, this is probably glog=4. + DNSLogLevelDebug DNSLogLevel = "Debug" + + // Trace is used when something went really badly and even more verbose logs are needed. Logging every function call as part of a common operation, to tracing execution of a query. In kube, this is probably glog=6. + DNSLogLevelTrace DNSLogLevel = "Trace" +) + // Server defines the schema for a server that runs per instance of CoreDNS. type Server struct { // name is required and specifies a unique name for the server. Name must comply @@ -86,19 +120,46 @@ type Server struct { ForwardPlugin ForwardPlugin `json:"forwardPlugin"` } +// ForwardingPolicy is the policy to use when forwarding DNS requests. +// +kubebuilder:validation:Enum=Random;RoundRobin;Sequential +type ForwardingPolicy string + +const ( + // RandomForwardingPolicy picks a random upstream server for each query. + RandomForwardingPolicy ForwardingPolicy = "Random" + + // RoundRobinForwardingPolicy picks upstream servers in a round-robin order, moving to the next server for each new query. + RoundRobinForwardingPolicy ForwardingPolicy = "RoundRobin" + + // SequentialForwardingPolicy tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + SequentialForwardingPolicy ForwardingPolicy = "Sequential" +) + // ForwardPlugin defines a schema for configuring the CoreDNS forward plugin. type ForwardPlugin struct { // upstreams is a list of resolvers to forward name queries for subdomains of Zones. - // Upstreams are randomized when more than 1 upstream is specified. Each instance of - // CoreDNS performs health checking of Upstreams. When a healthy upstream returns an - // error during the exchange, another resolver is tried from Upstreams. Each upstream - // is represented by an IP address or IP:port if the upstream listens on a port other - // than 53. + // Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream + // returns an error during the exchange, another resolver is tried from Upstreams. The + // Upstreams are selected in the order specified in Policy. Each upstream is represented + // by an IP address or IP:port if the upstream listens on a port other than 53. // // A maximum of 15 upstreams is allowed per ForwardPlugin. // // +kubebuilder:validation:MaxItems=15 Upstreams []string `json:"upstreams"` + + // policy is used to determine the order in which upstream servers are selected for querying. + // Any one of the following values may be specified: + // + // * "Random" picks a random upstream server for each query. + // * "RoundRobin" picks upstream servers in a round-robin order, moving to the next server for each new query. + // * "Sequential" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query. + // + // The default value is "Random" + // + // +optional + // +kubebuilder:default:="Random" + Policy ForwardingPolicy `json:"policy,omitempty"` } // DNSNodePlacement describes the node scheduling configuration for DNS pods. diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 44c02d6ec..a50b2b78d 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -87,11 +87,12 @@ type IngressControllerSpec struct { // If unset, the default is based on // infrastructure.config.openshift.io/cluster .status.platform: // - // AWS: LoadBalancerService (with External scope) - // Azure: LoadBalancerService (with External scope) - // GCP: LoadBalancerService (with External scope) - // IBMCloud: LoadBalancerService (with External scope) - // Libvirt: HostNetwork + // AWS: LoadBalancerService (with External scope) + // Azure: LoadBalancerService (with External scope) + // GCP: LoadBalancerService (with External scope) + // IBMCloud: LoadBalancerService (with External scope) + // AlibabaCloud: LoadBalancerService (with External scope) + // Libvirt: HostNetwork // // Any other platform types (including None) default to HostNetwork. // @@ -234,8 +235,57 @@ type IngressControllerSpec struct { // +nullable // +kubebuilder:pruning:PreserveUnknownFields UnsupportedConfigOverrides runtime.RawExtension `json:"unsupportedConfigOverrides"` + + // httpCompression defines a policy for HTTP traffic compression. + // By default, there is no HTTP compression. + // + // +optional + HTTPCompression HTTPCompressionPolicy `json:"httpCompression,omitempty"` +} + +// httpCompressionPolicy turns on compression for the specified MIME types. +// +// This field is optional, and its absence implies that compression should not be enabled +// globally in HAProxy. +// +// If httpCompressionPolicy exists, compression should be enabled only for the specified +// MIME types. +type HTTPCompressionPolicy struct { + // mimeTypes is a list of MIME types that should have compression applied. + // This list can be empty, in which case the ingress controller does not apply compression. + // + // Note: Not all MIME types benefit from compression, but HAProxy will still use resources + // to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) + // formats benefit from compression, but formats that are already compressed (image, + // audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing + // again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2 + // + // +listType=set + MimeTypes []CompressionMIMEType `json:"mimeTypes,omitempty"` } +// CompressionMIMEType defines the format of a single MIME type. +// E.g. "text/css; charset=utf-8", "text/html", "text/*", "image/svg+xml", +// "application/octet-stream", "X-custom/customsub", etc. +// +// The format should follow the Content-Type definition in RFC 1341: +// Content-Type := type "/" subtype *[";" parameter] +// - The type in Content-Type can be one of: +// application, audio, image, message, multipart, text, video, or a custom +// type preceded by "X-" and followed by a token as defined below. +// - The token is a string of at least one character, and not containing white +// space, control characters, or any of the characters in the tspecials set. +// - The tspecials set contains the characters ()<>@,;:\"/[]?.= +// - The subtype in Content-Type is also a token. +// - The optional parameter/s following the subtype are defined as: +// token "=" (token / quoted-string) +// - The quoted-string, as defined in RFC 822, is surrounded by double quotes +// and can contain white space plus any character EXCEPT \, ", and CR. +// It can also contain any single ASCII character as long as it is escaped by \. +// +// +kubebuilder:validation:Pattern=`^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$` +type CompressionMIMEType string + // NodePlacement describes node scheduling configuration for an ingress // controller. type NodePlacement struct { @@ -356,13 +406,14 @@ type ProviderLoadBalancerParameters struct { type LoadBalancerProviderType string const ( - AWSLoadBalancerProvider LoadBalancerProviderType = "AWS" - AzureLoadBalancerProvider LoadBalancerProviderType = "Azure" - GCPLoadBalancerProvider LoadBalancerProviderType = "GCP" - OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack" - VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere" - IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" - BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" + AWSLoadBalancerProvider LoadBalancerProviderType = "AWS" + AzureLoadBalancerProvider LoadBalancerProviderType = "Azure" + GCPLoadBalancerProvider LoadBalancerProviderType = "GCP" + OpenStackLoadBalancerProvider LoadBalancerProviderType = "OpenStack" + VSphereLoadBalancerProvider LoadBalancerProviderType = "VSphere" + IBMLoadBalancerProvider LoadBalancerProviderType = "IBM" + BareMetalLoadBalancerProvider LoadBalancerProviderType = "BareMetal" + AlibabaCloudLoadBalancerProvider LoadBalancerProviderType = "AlibabaCloud" ) // AWSLoadBalancerParameters provides configuration settings that are @@ -771,6 +822,17 @@ type SyslogLoggingDestinationParameters struct { // +kubebuilder:validation:Enum=kern;user;mail;daemon;auth;syslog;lpr;news;uucp;cron;auth2;ftp;ntp;audit;alert;cron2;local0;local1;local2;local3;local4;local5;local6;local7 // +optional Facility string `json:"facility,omitempty"` + + // maxLength is the maximum length of the syslog message + // + // If this field is empty, the maxLength is set to "1024". + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Maximum=4096 + // +kubebuilder:validation:Minimum=480 + // +kubebuilder:default=1024 + // +optional + MaxLength uint32 `json:"maxLength,omitempty"` } // ContainerLoggingDestinationParameters describes parameters for the Container diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index a7454e97c..c6938bfbb 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -1278,6 +1278,27 @@ func (in *GenerationStatus) DeepCopy() *GenerationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPCompressionPolicy) DeepCopyInto(out *HTTPCompressionPolicy) { + *out = *in + if in.MimeTypes != nil { + in, out := &in.MimeTypes, &out.MimeTypes + *out = make([]CompressionMIMEType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPCompressionPolicy. +func (in *HTTPCompressionPolicy) DeepCopy() *HTTPCompressionPolicy { + if in == nil { + return nil + } + out := new(HTTPCompressionPolicy) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HostNetworkStrategy) DeepCopyInto(out *HostNetworkStrategy) { *out = *in @@ -1630,6 +1651,7 @@ func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { } in.TuningOptions.DeepCopyInto(&out.TuningOptions) in.UnsupportedConfigOverrides.DeepCopyInto(&out.UnsupportedConfigOverrides) + in.HTTPCompression.DeepCopyInto(&out.HTTPCompression) return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index bb4110389..95b3fd700 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -422,10 +422,12 @@ func (DNSNodePlacement) SwaggerDoc() map[string]string { } var map_DNSSpec = map[string]string{ - "": "DNSSpec is the specification of the desired behavior of the DNS.", - "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", - "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.", - "managementState": "managementState indicates whether the DNS operator should manage cluster DNS", + "": "DNSSpec is the specification of the desired behavior of the DNS.", + "servers": "servers is a list of DNS resolvers that provide name query delegation for one or more subdomains outside the scope of the cluster domain. If servers consists of more than one Server, longest suffix match will be used to determine the Server.\n\nFor example, if there are two Servers, one for \"foo.com\" and another for \"a.foo.com\", and the name query is for \"www.a.foo.com\", it will be routed to the Server with Zone \"a.foo.com\".\n\nIf this field is nil, no servers are created.", + "nodePlacement": "nodePlacement provides explicit control over the scheduling of DNS pods.\n\nGenerally, it is useful to run a DNS pod on every node so that DNS queries are always handled by a local DNS pod instead of going over the network to a DNS pod on another node. However, security policies may require restricting the placement of DNS pods to specific nodes. For example, if a security policy prohibits pods on arbitrary nodes from communicating with the API, a node selector can be specified to restrict DNS pods to nodes that are permitted to communicate with the API. Conversely, if running DNS pods on nodes with a particular taint is desired, a toleration can be specified for that taint.\n\nIf unset, defaults are used. See nodePlacement for more details.", + "managementState": "managementState indicates whether the DNS operator should manage cluster DNS", + "operatorLogLevel": "operatorLogLevel controls the logging level of the DNS Operator. Valid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\". setting operatorLogLevel: Trace will produce extremely verbose logs.", + "logLevel": "logLevel describes the desired logging verbosity for CoreDNS. Any one of the following values may be specified: * Normal logs errors from upstream resolvers. * Debug logs errors, NXDOMAIN responses, and NODATA responses. * Trace logs errors and all responses.\n Setting logLevel: Trace will produce extremely verbose logs.\nValid values are: \"Normal\", \"Debug\", \"Trace\". Defaults to \"Normal\".", } func (DNSSpec) SwaggerDoc() map[string]string { @@ -445,7 +447,8 @@ func (DNSStatus) SwaggerDoc() map[string]string { var map_ForwardPlugin = map[string]string{ "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", - "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Upstreams are randomized when more than 1 upstream is specified. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", } func (ForwardPlugin) SwaggerDoc() map[string]string { @@ -561,6 +564,15 @@ func (GCPLoadBalancerParameters) SwaggerDoc() map[string]string { return map_GCPLoadBalancerParameters } +var map_HTTPCompressionPolicy = map[string]string{ + "": "httpCompressionPolicy turns on compression for the specified MIME types.\n\nThis field is optional, and its absence implies that compression should not be enabled globally in HAProxy.\n\nIf httpCompressionPolicy exists, compression should be enabled only for the specified MIME types.", + "mimeTypes": "mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression.\n\nNote: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2", +} + +func (HTTPCompressionPolicy) SwaggerDoc() map[string]string { + return map_HTTPCompressionPolicy +} + var map_HostNetworkStrategy = map[string]string{ "": "HostNetworkStrategy holds parameters for the HostNetwork endpoint publishing strategy.", "protocol": "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol.\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nThe following values are valid for this field:\n\n* The empty string. * \"TCP\". * \"PROXY\".\n\nThe empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change.", @@ -663,7 +675,7 @@ var map_IngressControllerSpec = map[string]string{ "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", "httpErrorCodePages": "httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format \"error-page-.http\", where is an HTTP error code. For example, \"error-page-503.http\" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages.", "replicas": "replicas is the desired number of ingress controller replicas. If unset, defaults to 2.", - "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService (with External scope)\n Azure: LoadBalancerService (with External scope)\n GCP: LoadBalancerService (with External scope)\n IBMCloud: LoadBalancerService (with External scope)\n AlibabaCloud: LoadBalancerService (with External scope)\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nIf a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", @@ -676,6 +688,7 @@ var map_IngressControllerSpec = map[string]string{ "httpEmptyRequestsPolicy": "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\".\n\nTypically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts.", "tuningOptions": "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details.\n\nSetting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations.", "unsupportedConfigOverrides": "unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported.", + "httpCompression": "httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression.", } func (IngressControllerSpec) SwaggerDoc() map[string]string { @@ -784,10 +797,11 @@ func (RouteAdmissionPolicy) SwaggerDoc() map[string]string { } var map_SyslogLoggingDestinationParameters = map[string]string{ - "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.", - "address": "address is the IP address of the syslog endpoint that receives log messages.", - "port": "port is the UDP port number of the syslog endpoint that receives log messages.", - "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", + "": "SyslogLoggingDestinationParameters describes parameters for the Syslog logging destination type.", + "address": "address is the IP address of the syslog endpoint that receives log messages.", + "port": "port is the UDP port number of the syslog endpoint that receives log messages.", + "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", + "maxLength": "maxLength is the maximum length of the syslog message\n\nIf this field is empty, the maxLength is set to \"1024\".", } func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go index 0f2182d2f..707e5fc06 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -19,6 +19,7 @@ type ConfigV1Interface interface { DNSesGetter FeatureGatesGetter ImagesGetter + ImageContentPoliciesGetter InfrastructuresGetter IngressesGetter NetworksGetter @@ -70,6 +71,10 @@ func (c *ConfigV1Client) Images() ImageInterface { return newImages(c) } +func (c *ConfigV1Client) ImageContentPolicies() ImageContentPolicyInterface { + return newImageContentPolicies(c) +} + func (c *ConfigV1Client) Infrastructures() InfrastructureInterface { return newInfrastructures(c) } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go index d743f4679..40d153d7c 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go @@ -48,6 +48,10 @@ func (c *FakeConfigV1) Images() v1.ImageInterface { return &FakeImages{c} } +func (c *FakeConfigV1) ImageContentPolicies() v1.ImageContentPolicyInterface { + return &FakeImageContentPolicies{c} +} + func (c *FakeConfigV1) Infrastructures() v1.InfrastructureInterface { return &FakeInfrastructures{c} } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go new file mode 100644 index 000000000..1cb92e2ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_imagecontentpolicy.go @@ -0,0 +1,106 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + configv1 "github.com/openshift/api/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeImageContentPolicies implements ImageContentPolicyInterface +type FakeImageContentPolicies struct { + Fake *FakeConfigV1 +} + +var imagecontentpoliciesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "imagecontentpolicies"} + +var imagecontentpoliciesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "ImageContentPolicy"} + +// Get takes name of the imageContentPolicy, and returns the corresponding imageContentPolicy object, and an error if there is any. +func (c *FakeImageContentPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(imagecontentpoliciesResource, name), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} + +// List takes label and field selectors, and returns the list of ImageContentPolicies that match those selectors. +func (c *FakeImageContentPolicies) List(ctx context.Context, opts v1.ListOptions) (result *configv1.ImageContentPolicyList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(imagecontentpoliciesResource, imagecontentpoliciesKind, opts), &configv1.ImageContentPolicyList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &configv1.ImageContentPolicyList{ListMeta: obj.(*configv1.ImageContentPolicyList).ListMeta} + for _, item := range obj.(*configv1.ImageContentPolicyList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested imageContentPolicies. +func (c *FakeImageContentPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(imagecontentpoliciesResource, opts)) +} + +// Create takes the representation of a imageContentPolicy and creates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. +func (c *FakeImageContentPolicies) Create(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts v1.CreateOptions) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(imagecontentpoliciesResource, imageContentPolicy), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} + +// Update takes the representation of a imageContentPolicy and updates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. +func (c *FakeImageContentPolicies) Update(ctx context.Context, imageContentPolicy *configv1.ImageContentPolicy, opts v1.UpdateOptions) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(imagecontentpoliciesResource, imageContentPolicy), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} + +// Delete takes name of the imageContentPolicy and deletes it. Returns an error if one occurs. +func (c *FakeImageContentPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(imagecontentpoliciesResource, name), &configv1.ImageContentPolicy{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeImageContentPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(imagecontentpoliciesResource, listOpts) + + _, err := c.Fake.Invokes(action, &configv1.ImageContentPolicyList{}) + return err +} + +// Patch applies the patch and returns the patched imageContentPolicy. +func (c *FakeImageContentPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *configv1.ImageContentPolicy, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(imagecontentpoliciesResource, name, pt, data, subresources...), &configv1.ImageContentPolicy{}) + if obj == nil { + return nil, err + } + return obj.(*configv1.ImageContentPolicy), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go index 50a4ec7f8..646801584 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go @@ -20,6 +20,8 @@ type FeatureGateExpansion interface{} type ImageExpansion interface{} +type ImageContentPolicyExpansion interface{} + type InfrastructureExpansion interface{} type IngressExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go new file mode 100644 index 000000000..17c441e5b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/imagecontentpolicy.go @@ -0,0 +1,152 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ImageContentPoliciesGetter has a method to return a ImageContentPolicyInterface. +// A group's client should implement this interface. +type ImageContentPoliciesGetter interface { + ImageContentPolicies() ImageContentPolicyInterface +} + +// ImageContentPolicyInterface has methods to work with ImageContentPolicy resources. +type ImageContentPolicyInterface interface { + Create(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.CreateOptions) (*v1.ImageContentPolicy, error) + Update(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.UpdateOptions) (*v1.ImageContentPolicy, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageContentPolicy, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageContentPolicyList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageContentPolicy, err error) + ImageContentPolicyExpansion +} + +// imageContentPolicies implements ImageContentPolicyInterface +type imageContentPolicies struct { + client rest.Interface +} + +// newImageContentPolicies returns a ImageContentPolicies +func newImageContentPolicies(c *ConfigV1Client) *imageContentPolicies { + return &imageContentPolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the imageContentPolicy, and returns the corresponding imageContentPolicy object, and an error if there is any. +func (c *imageContentPolicies) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageContentPolicy, err error) { + result = &v1.ImageContentPolicy{} + err = c.client.Get(). + Resource("imagecontentpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ImageContentPolicies that match those selectors. +func (c *imageContentPolicies) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageContentPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ImageContentPolicyList{} + err = c.client.Get(). + Resource("imagecontentpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested imageContentPolicies. +func (c *imageContentPolicies) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("imagecontentpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a imageContentPolicy and creates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. +func (c *imageContentPolicies) Create(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.CreateOptions) (result *v1.ImageContentPolicy, err error) { + result = &v1.ImageContentPolicy{} + err = c.client.Post(). + Resource("imagecontentpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageContentPolicy). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a imageContentPolicy and updates it. Returns the server's representation of the imageContentPolicy, and an error, if there is any. +func (c *imageContentPolicies) Update(ctx context.Context, imageContentPolicy *v1.ImageContentPolicy, opts metav1.UpdateOptions) (result *v1.ImageContentPolicy, err error) { + result = &v1.ImageContentPolicy{} + err = c.client.Put(). + Resource("imagecontentpolicies"). + Name(imageContentPolicy.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageContentPolicy). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the imageContentPolicy and deletes it. Returns an error if one occurs. +func (c *imageContentPolicies) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("imagecontentpolicies"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *imageContentPolicies) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("imagecontentpolicies"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched imageContentPolicy. +func (c *imageContentPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageContentPolicy, err error) { + result = &v1.ImageContentPolicy{} + err = c.client.Patch(pt). + Resource("imagecontentpolicies"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..14db57a58 --- /dev/null +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/scheme/register.go similarity index 65% rename from vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme/register.go rename to vendor/github.com/openshift/client-go/machine/clientset/versioned/scheme/register.go index 6d21458f1..91805c2bb 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme/register.go +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/scheme/register.go @@ -1,25 +1,9 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ // Code generated by client-gen. DO NOT EDIT. package scheme import ( - machinev1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/doc.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/doc.go new file mode 100644 index 000000000..897c0995f --- /dev/null +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/generated_expansion.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/generated_expansion.go new file mode 100644 index 000000000..d45fc4afe --- /dev/null +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/generated_expansion.go @@ -0,0 +1,9 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type MachineExpansion interface{} + +type MachineHealthCheckExpansion interface{} + +type MachineSetExpansion interface{} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machine.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machine.go similarity index 88% rename from vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machine.go rename to vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machine.go index e0efc668c..f67e4ea67 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machine.go +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machine.go @@ -1,19 +1,3 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ // Code generated by client-gen. DO NOT EDIT. package v1beta1 @@ -22,8 +6,8 @@ import ( "context" "time" - v1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme" + v1beta1 "github.com/openshift/api/machine/v1beta1" + scheme "github.com/openshift/client-go/machine/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machine_client.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machine_client.go similarity index 73% rename from vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machine_client.go rename to vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machine_client.go index 7eb75589c..8509b2589 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machine_client.go +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machine_client.go @@ -1,26 +1,10 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ // Code generated by client-gen. DO NOT EDIT. package v1beta1 import ( - v1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" - "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme" + v1beta1 "github.com/openshift/api/machine/v1beta1" + "github.com/openshift/client-go/machine/clientset/versioned/scheme" rest "k8s.io/client-go/rest" ) diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machinehealthcheck.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machinehealthcheck.go similarity index 89% rename from vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machinehealthcheck.go rename to vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machinehealthcheck.go index 78ee76ec7..299802157 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machinehealthcheck.go +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machinehealthcheck.go @@ -1,19 +1,3 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ // Code generated by client-gen. DO NOT EDIT. package v1beta1 @@ -22,8 +6,8 @@ import ( "context" "time" - v1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme" + v1beta1 "github.com/openshift/api/machine/v1beta1" + scheme "github.com/openshift/client-go/machine/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machineset.go b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machineset.go similarity index 88% rename from vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machineset.go rename to vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machineset.go index 531fcec96..0720dea13 100644 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/machineset.go +++ b/vendor/github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1/machineset.go @@ -1,19 +1,3 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ // Code generated by client-gen. DO NOT EDIT. package v1beta1 @@ -22,8 +6,8 @@ import ( "context" "time" - v1beta1 "github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1" - scheme "github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme" + v1beta1 "github.com/openshift/api/machine/v1beta1" + scheme "github.com/openshift/client-go/machine/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/LICENSE b/vendor/github.com/openshift/cluster-api-provider-gcp/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/doc.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/doc.go deleted file mode 100644 index eea429419..000000000 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package v1beta1 contains API Schema definitions for the gcpprovider v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider -// +k8s:defaulter-gen=TypeMeta -// +groupName=gcpprovider.machine.openshift.io -package v1beta1 diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderconfig_types.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderconfig_types.go deleted file mode 100644 index 04aa60f95..000000000 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderconfig_types.go +++ /dev/null @@ -1,106 +0,0 @@ -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field -// for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. -// +k8s:openapi-gen=true -type GCPMachineProviderSpec struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // UserDataSecret contains a local reference to a secret that contains the - // UserData to apply to the instance - UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` - - // CredentialsSecret is a reference to the secret with GCP credentials. - CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` - - CanIPForward bool `json:"canIPForward"` - DeletionProtection bool `json:"deletionProtection"` - Disks []*GCPDisk `json:"disks,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Metadata []*GCPMetadata `json:"gcpMetadata,omitempty"` - NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"` - ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"` - Tags []string `json:"tags,omitempty"` - TargetPools []string `json:"targetPools,omitempty"` - MachineType string `json:"machineType"` - Region string `json:"region"` - Zone string `json:"zone"` - ProjectID string `json:"projectID,omitempty"` - - // Preemptible indicates if created instance is preemptible - Preemptible bool `json:"preemptible,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -func init() { - SchemeBuilder.Register(&GCPMachineProviderSpec{}) -} - -// GCPDisk describes disks for GCP. -type GCPDisk struct { - AutoDelete bool `json:"autoDelete"` - Boot bool `json:"boot"` - SizeGb int64 `json:"sizeGb"` - Type string `json:"type"` - Image string `json:"image"` - Labels map[string]string `json:"labels"` - EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"` -} - -// GCPMetadata describes metadata for GCP. -type GCPMetadata struct { - Key string `json:"key"` - Value *string `json:"value"` -} - -// GCPNetworkInterface describes network interfaces for GCP -type GCPNetworkInterface struct { - PublicIP bool `json:"publicIP,omitempty"` - Network string `json:"network,omitempty"` - ProjectID string `json:"projectID,omitempty"` - Subnetwork string `json:"subnetwork,omitempty"` -} - -// GCPServiceAccount describes service accounts for GCP. -type GCPServiceAccount struct { - Email string `json:"email"` - Scopes []string `json:"scopes"` -} - -// GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption. -type GCPEncryptionKeyReference struct { - KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` - - // KMSKeyServiceAccount is the service account being used for the - // encryption request for the given KMS key. If absent, the Compute - // Engine default service account is used. - // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account - // for details on the default service account. - KMSKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"` -} - -// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key -type GCPKMSKeyReference struct { - // Name is the name of the customer managed encryption key to be used for the disk encryption. - Name string `json:"name"` - - // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to. - KeyRing string `json:"keyRing"` - - // ProjectID is the ID of the Project in which the KMS Key Ring exists. - // Defaults to the VM ProjectID if not set. - ProjectID string `json:"projectID,omitempty"` - - // Location is the GCP location in which the Key Ring exists. - Location string `json:"location"` -} diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderstatus_types.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderstatus_types.go deleted file mode 100644 index 144b6cc02..000000000 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/gcpmachineproviderstatus_types.go +++ /dev/null @@ -1,65 +0,0 @@ -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. -// It contains GCP-specific status information. -// +k8s:openapi-gen=true -type GCPMachineProviderStatus struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // InstanceID is the ID of the instance in GCP - // +optional - InstanceID *string `json:"instanceId,omitempty"` - - // InstanceState is the provisioning state of the GCP Instance. - // +optional - InstanceState *string `json:"instanceState,omitempty"` - - // Conditions is a set of conditions associated with the Machine to indicate - // errors or other status - Conditions []GCPMachineProviderCondition `json:"conditions,omitempty"` -} - -// GCPMachineProviderConditionType is a valid value for GCPMachineProviderCondition.Type -type GCPMachineProviderConditionType string - -// Valid conditions for an GCP machine instance -const ( - // MachineCreated indicates whether the machine has been created or not. If not, - // it should include a reason and message for the failure. - MachineCreated GCPMachineProviderConditionType = "MachineCreated" -) - -// GCPMachineProviderCondition is a condition in a GCPMachineProviderStatus -type GCPMachineProviderCondition struct { - // Type is the type of the condition. - Type GCPMachineProviderConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -func init() { - SchemeBuilder.Register(&GCPMachineProviderStatus{}) -} diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/register.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/register.go deleted file mode 100644 index 3400d917a..000000000 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/register.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package v1beta1 contains API Schema definitions for the gcpprovider v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider -// +k8s:defaulter-gen=TypeMeta -// +groupName=gcpprovider.machine.openshift.io -package v1beta1 - -import ( - "encoding/json" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/scheme" - "sigs.k8s.io/yaml" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "gcpprovider.openshift.io", Version: "v1beta1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) - -// RawExtensionFromProviderSpec marshals the machine provider spec. -func RawExtensionFromProviderSpec(spec *GCPMachineProviderSpec) (*runtime.RawExtension, error) { - if spec == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - if rawBytes, err = json.Marshal(spec); err != nil { - return nil, fmt.Errorf("error marshalling providerSpec: %v", err) - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -// RawExtensionFromProviderStatus marshals the provider status -func RawExtensionFromProviderStatus(status *GCPMachineProviderStatus) (*runtime.RawExtension, error) { - if status == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - if rawBytes, err = json.Marshal(status); err != nil { - return nil, fmt.Errorf("error marshalling providerStatus: %v", err) - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -// ProviderSpecFromRawExtension unmarshals the JSON-encoded spec -func ProviderSpecFromRawExtension(rawExtension *runtime.RawExtension) (*GCPMachineProviderSpec, error) { - if rawExtension == nil { - return &GCPMachineProviderSpec{}, nil - } - - spec := new(GCPMachineProviderSpec) - if err := yaml.Unmarshal(rawExtension.Raw, &spec); err != nil { - return nil, fmt.Errorf("error unmarshalling providerSpec: %v", err) - } - - klog.V(5).Infof("Got provider spec from raw extension: %+v", spec) - return spec, nil -} - -// ProviderStatusFromRawExtension unmarshals a raw extension into a GCPMachineProviderStatus type -func ProviderStatusFromRawExtension(rawExtension *runtime.RawExtension) (*GCPMachineProviderStatus, error) { - if rawExtension == nil { - return &GCPMachineProviderStatus{}, nil - } - - providerStatus := new(GCPMachineProviderStatus) - if err := yaml.Unmarshal(rawExtension.Raw, providerStatus); err != nil { - return nil, fmt.Errorf("error unmarshalling providerStatus: %v", err) - } - - klog.V(5).Infof("Got provider Status from raw extension: %+v", providerStatus) - return providerStatus, nil -} diff --git a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 9f3d6d443..000000000 --- a/vendor/github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,294 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPDisk) DeepCopyInto(out *GCPDisk) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.EncryptionKey != nil { - in, out := &in.EncryptionKey, &out.EncryptionKey - *out = new(GCPEncryptionKeyReference) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk. -func (in *GCPDisk) DeepCopy() *GCPDisk { - if in == nil { - return nil - } - out := new(GCPDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPEncryptionKeyReference) DeepCopyInto(out *GCPEncryptionKeyReference) { - *out = *in - if in.KMSKey != nil { - in, out := &in.KMSKey, &out.KMSKey - *out = new(GCPKMSKeyReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEncryptionKeyReference. -func (in *GCPEncryptionKeyReference) DeepCopy() *GCPEncryptionKeyReference { - if in == nil { - return nil - } - out := new(GCPEncryptionKeyReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. -func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { - if in == nil { - return nil - } - out := new(GCPKMSKeyReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineProviderCondition) DeepCopyInto(out *GCPMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderCondition. -func (in *GCPMachineProviderCondition) DeepCopy() *GCPMachineProviderCondition { - if in == nil { - return nil - } - out := new(GCPMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineProviderSpec) DeepCopyInto(out *GCPMachineProviderSpec) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.UserDataSecret != nil { - in, out := &in.UserDataSecret, &out.UserDataSecret - *out = new(v1.LocalObjectReference) - **out = **in - } - if in.CredentialsSecret != nil { - in, out := &in.CredentialsSecret, &out.CredentialsSecret - *out = new(v1.LocalObjectReference) - **out = **in - } - if in.Disks != nil { - in, out := &in.Disks, &out.Disks - *out = make([]*GCPDisk, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPDisk) - (*in).DeepCopyInto(*out) - } - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Metadata != nil { - in, out := &in.Metadata, &out.Metadata - *out = make([]*GCPMetadata, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPMetadata) - (*in).DeepCopyInto(*out) - } - } - } - if in.NetworkInterfaces != nil { - in, out := &in.NetworkInterfaces, &out.NetworkInterfaces - *out = make([]*GCPNetworkInterface, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(GCPNetworkInterface) - **out = **in - } - } - } - if in.ServiceAccounts != nil { - in, out := &in.ServiceAccounts, &out.ServiceAccounts - *out = make([]GCPServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.TargetPools != nil { - in, out := &in.TargetPools, &out.TargetPools - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderSpec. -func (in *GCPMachineProviderSpec) DeepCopy() *GCPMachineProviderSpec { - if in == nil { - return nil - } - out := new(GCPMachineProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCPMachineProviderSpec) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMachineProviderStatus) DeepCopyInto(out *GCPMachineProviderStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.InstanceID != nil { - in, out := &in.InstanceID, &out.InstanceID - *out = new(string) - **out = **in - } - if in.InstanceState != nil { - in, out := &in.InstanceState, &out.InstanceState - *out = new(string) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]GCPMachineProviderCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderStatus. -func (in *GCPMachineProviderStatus) DeepCopy() *GCPMachineProviderStatus { - if in == nil { - return nil - } - out := new(GCPMachineProviderStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *GCPMachineProviderStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPMetadata) DeepCopyInto(out *GCPMetadata) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMetadata. -func (in *GCPMetadata) DeepCopy() *GCPMetadata { - if in == nil { - return nil - } - out := new(GCPMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPNetworkInterface) DeepCopyInto(out *GCPNetworkInterface) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPNetworkInterface. -func (in *GCPNetworkInterface) DeepCopy() *GCPNetworkInterface { - if in == nil { - return nil - } - out := new(GCPNetworkInterface) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount) { - *out = *in - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount. -func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount { - if in == nil { - return nil - } - out := new(GCPServiceAccount) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/machine-api-operator/LICENSE b/vendor/github.com/openshift/machine-api-operator/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/openshift/machine-api-operator/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/register.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/register.go deleted file mode 100644 index 1b710aafa..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/register.go +++ /dev/null @@ -1,8 +0,0 @@ -// Generate deepcopy for apis -//go:generate go run ../../../vendor/sigs.k8s.io/controller-tools/cmd/controller-gen paths=./... object:headerFile=../../../hack/boilerplate.go.txt,year=2019 -// Ensure generated code is goimports compliant -//go:generate goimports -w ./v1beta1/zz_generated.deepcopy.go - -package machine - -const GroupName = "machine.openshift.io" diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/common_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/common_types.go deleted file mode 100644 index b49141ac6..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/common_types.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// ProviderSpec defines the configuration to use during node creation. -type ProviderSpec struct { - - // No more than one of the following may be specified. - - // Value is an inlined, serialized representation of the resource - // configuration. It is recommended that providers maintain their own - // versioned API types that should be serialized/deserialized from this - // field, akin to component config. - // +optional - // +kubebuilder:validation:XPreserveUnknownFields - Value *runtime.RawExtension `json:"value,omitempty"` -} - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. This is a copy of customizable fields from metav1.ObjectMeta. -// -// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, -// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases -// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies -// the API and some issues that can impact user experience. -// -// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) -// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, -// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`. -// The investigation showed that `controller-tools@v2` behaves differently than its previous version -// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package. -// -// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` -// had validation properties, including for `creationTimestamp` (metav1.Time). -// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` -// which breaks validation because the field isn't marked as nullable. -// -// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded -// types. When that happens, this hack should be revisited. -type ObjectMeta struct { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - // +optional - Name string `json:"name,omitempty"` - - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency - // +optional - GenerateName string `json:"generateName,omitempty"` - - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces - // +optional - Namespace string `json:"namespace,omitempty"` - - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://kubernetes.io/docs/user-guide/labels - // +optional - Labels map[string]string `json:"labels,omitempty"` - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations - // +optional - Annotations map[string]string `json:"annotations,omitempty"` - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - // +patchMergeKey=uid - // +patchStrategy=merge - OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_consts.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_consts.go deleted file mode 100644 index 9b579de3f..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_consts.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// Conditions and condition Reasons for the MachineHealthCheck object -const ( - // RemediationAllowedCondition is set on MachineHealthChecks to show the status of whether the MachineHealthCheck is - // allowed to remediate any Machines or whether it is blocked from remediating any further. - RemediationAllowedCondition ConditionType = "RemediationAllowed" - - // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked - // from making any further remediations. - TooManyUnhealthyReason = "TooManyUnhealthy" - - // ExternalRemediationTemplateAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. - // ExternalRemediationTemplateAvailable is set to false if external remediation template is not found. - ExternalRemediationTemplateAvailable ConditionType = "ExternalRemediationTemplateAvailable" - - // ExternalRemediationTemplateNotFound is the reason used when a machine health check fails to find external remediation template. - ExternalRemediationTemplateNotFound = "ExternalRemediationTemplateNotFound" - - // ExternalRemediationRequestAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. - // ExternalRemediationRequestAvailable is set to false if creating external remediation request fails. - ExternalRemediationRequestAvailable ConditionType = "ExternalRemediationRequestAvailable" - - // ExternalRemediationRequestCreationFailed is the reason used when a machine health check fails to create external remediation request. - ExternalRemediationRequestCreationFailed = "ExternalRemediationRequestCreationFailed" -) - -const ( - // InstanceExistsCondition is set on the Machine to show whether a virtual mahcine has been created by the cloud provider. - InstanceExistsCondition ConditionType = "InstanceExists" - - // ErrorCheckingProviderReason is the reason used when the exist operation fails. - // This would normally be because we cannot contact the provider. - ErrorCheckingProviderReason = "ErrorCheckingProvider" - - // InstanceMissingReason is the reason used when the machine was provisioned, but the instance has gone missing. - InstanceMissingReason = "InstanceMissing" - - // InstanceNotCreatedReason is the reason used when the machine has not yet been provisioned. - InstanceNotCreatedReason = "InstanceNotCreated" -) diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_types.go deleted file mode 100644 index d95da5377..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/condition_types.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ConditionSeverity expresses the severity of a Condition Type failing. -type ConditionSeverity string - -const ( - // ConditionSeverityError specifies that a condition with `Status=False` is an error. - ConditionSeverityError ConditionSeverity = "Error" - - // ConditionSeverityWarning specifies that a condition with `Status=False` is a warning. - ConditionSeverityWarning ConditionSeverity = "Warning" - - // ConditionSeverityInfo specifies that a condition with `Status=False` is informative. - ConditionSeverityInfo ConditionSeverity = "Info" - - // ConditionSeverityNone should apply only to conditions with `Status=True`. - ConditionSeverityNone ConditionSeverity = "" -) - -// ConditionType is a valid value for Condition.Type. -type ConditionType string - -// Condition defines an observation of a Machine API resource operational state. -type Condition struct { - // Type of condition in CamelCase or in foo.example.com/CamelCase. - // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions - // can be useful (see .node.status.conditions), the ability to deconflict is important. - // +required - Type ConditionType `json:"type"` - - // Status of the condition, one of True, False, Unknown. - // +required - Status corev1.ConditionStatus `json:"status"` - - // Severity provides an explicit classification of Reason code, so the users or machines can immediately - // understand the current situation and act accordingly. - // The Severity field MUST be set only when Status=False. - // +optional - Severity ConditionSeverity `json:"severity,omitempty"` - - // Last time the condition transitioned from one status to another. - // This should be when the underlying condition changed. If that is not known, then using the time when - // the API field changed is acceptable. - // +required - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - - // The reason for the condition's last transition in CamelCase. - // The specific API may choose whether or not this field is considered a guaranteed API. - // This field may not be empty. - // +optional - Reason string `json:"reason,omitempty"` - - // A human readable message indicating details about the transition. - // This field may be empty. - // +optional - Message string `json:"message,omitempty"` -} - -// Conditions provide observations of the operational state of a Machine API resource. -type Conditions []Condition diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/consts.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/consts.go deleted file mode 100644 index 26a7ea273..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/consts.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -const ( - // PausedAnnotation is an annotation that can be applied to MachineHealthCheck objects to prevent the MHC controller - // from processing it. - PausedAnnotation = "cluster.x-k8s.io/paused" -) - -// Constants aren't automatically generated for unversioned packages. -// Instead share the same constant for all versioned packages -type MachineStatusError string - -const ( - // Represents that the combination of configuration in the MachineSpec - // is not supported by this cluster. This is not a transient error, but - // indicates a state that must be fixed before progress can be made. - // - // Example: the ProviderSpec specifies an instance type that doesn't exist, - InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" - - // This indicates that the MachineSpec has been updated in a way that - // is not supported for reconciliation on this cluster. The spec may be - // completely valid from a configuration standpoint, but the controller - // does not support changing the real world state to match the new - // spec. - // - // Example: the responsible controller is not capable of changing the - // container runtime from docker to rkt. - UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" - - // This generally refers to exceeding one's quota in a cloud provider, - // or running out of physical machines in an on-premise environment. - InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" - - // There was an error while trying to create a Node to match this - // Machine. This may indicate a transient problem that will be fixed - // automatically with time, such as a service outage, or a terminal - // error during creation that doesn't match a more specific - // MachineStatusError value. - // - // Example: timeout trying to connect to GCE. - CreateMachineError MachineStatusError = "CreateError" - - // There was an error while trying to update a Node that this - // Machine represents. This may indicate a transient problem that will be - // fixed automatically with time, such as a service outage, - // - // Example: error updating load balancers - UpdateMachineError MachineStatusError = "UpdateError" - - // An error was encountered while trying to delete the Node that this - // Machine represents. This could be a transient or terminal error, but - // will only be observable if the provider's Machine controller has - // added a finalizer to the object to more gracefully handle deletions. - // - // Example: cannot resolve EC2 IP address. - DeleteMachineError MachineStatusError = "DeleteError" - - // TemplateClonedFromGroupKindAnnotation is the infrastructure machine - // annotation that stores the group-kind of the infrastructure template resource - // that was cloned for the machine. This annotation is set only during cloning a - // template. Older/adopted machines will not have this annotation. - TemplateClonedFromGroupKindAnnotation = "machine.openshift.io/cloned-from-groupkind" - - // TemplateClonedFromNameAnnotation is the infrastructure machine annotation that - // stores the name of the infrastructure template resource - // that was cloned for the machine. This annotation is set only during cloning a - // template. Older/adopted machines will not have this annotation. - TemplateClonedFromNameAnnotation = "machine.openshift.io/cloned-from-name" - - // This error indicates that the machine did not join the cluster - // as a new node within the expected timeframe after instance - // creation at the provider succeeded - // - // Example use case: A controller that deletes Machines which do - // not result in a Node joining the cluster within a given timeout - // and that are managed by a MachineSet - JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" -) - -type ClusterStatusError string - -const ( - // InvalidConfigurationClusterError indicates that the cluster - // configuration is invalid. - InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" - - // UnsupportedChangeClusterError indicates that the cluster - // spec has been updated in an unsupported way. That cannot be - // reconciled. - UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" - - // CreateClusterError indicates that an error was encountered - // when trying to create the cluster. - CreateClusterError ClusterStatusError = "CreateError" - - // UpdateClusterError indicates that an error was encountered - // when trying to update the cluster. - UpdateClusterError ClusterStatusError = "UpdateError" - - // DeleteClusterError indicates that an error was encountered - // when trying to delete the cluster. - DeleteClusterError ClusterStatusError = "DeleteError" -) - -type MachineSetStatusError string - -const ( - // Represents that the combination of configuration in the MachineTemplateSpec - // is not supported by this cluster. This is not a transient error, but - // indicates a state that must be fixed before progress can be made. - // - // Example: the ProviderSpec specifies an instance type that doesn't exist. - InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" -) - -type MachineDeploymentStrategyType string - -const ( - // Replace the old MachineSet by new one using rolling update - // i.e. gradually scale down the old MachineSet and scale up the new one. - RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" -) diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/doc.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/doc.go deleted file mode 100644 index fb656f231..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package v1alpha1 contains API Schema definitions for the healthchecking v1beta1 API group -// +k8s:deepcopy-gen=package,register -// +groupName=machine.openshift.io -package v1beta1 diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go deleted file mode 100644 index bea369f5a..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machine_webhook.go +++ /dev/null @@ -1,1173 +0,0 @@ -package v1beta1 - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "runtime" - "strings" - - osconfigv1 "github.com/openshift/api/config/v1" - osclientset "github.com/openshift/client-go/config/clientset/versioned" - gcp "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1" - "github.com/openshift/machine-api-operator/pkg/apis/machine" - vsphere "github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kruntime "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/klog/v2" - "k8s.io/utils/pointer" - aws "sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1" - azure "sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - yaml "sigs.k8s.io/yaml" -) - -var ( - // Azure Defaults - defaultAzureVnet = func(clusterID string) string { - return fmt.Sprintf("%s-vnet", clusterID) - } - defaultAzureSubnet = func(clusterID string) string { - return fmt.Sprintf("%s-worker-subnet", clusterID) - } - defaultAzureNetworkResourceGroup = func(clusterID string) string { - return fmt.Sprintf("%s-rg", clusterID) - } - defaultAzureImageResourceID = func(clusterID string) string { - return fmt.Sprintf("/resourceGroups/%s/providers/Microsoft.Compute/images/%s", clusterID+"-rg", clusterID) - } - defaultAzureManagedIdentiy = func(clusterID string) string { - return fmt.Sprintf("%s-identity", clusterID) - } - defaultAzureResourceGroup = func(clusterID string) string { - return fmt.Sprintf("%s-rg", clusterID) - } - - // GCP Defaults - defaultGCPNetwork = func(clusterID string) string { - return fmt.Sprintf("%s-network", clusterID) - } - defaultGCPSubnetwork = func(clusterID string) string { - return fmt.Sprintf("%s-worker-subnet", clusterID) - } - defaultGCPTags = func(clusterID string) []string { - return []string{fmt.Sprintf("%s-worker", clusterID)} - } -) - -const ( - DefaultMachineMutatingHookPath = "/mutate-machine-openshift-io-v1beta1-machine" - DefaultMachineValidatingHookPath = "/validate-machine-openshift-io-v1beta1-machine" - DefaultMachineSetMutatingHookPath = "/mutate-machine-openshift-io-v1beta1-machineset" - DefaultMachineSetValidatingHookPath = "/validate-machine-openshift-io-v1beta1-machineset" - - defaultWebhookConfigurationName = "machine-api" - defaultWebhookServiceName = "machine-api-operator-webhook" - defaultWebhookServiceNamespace = "openshift-machine-api" - defaultWebhookServicePort = 443 - - defaultUserDataSecret = "worker-user-data" - defaultSecretNamespace = "openshift-machine-api" - - // AWS Defaults - defaultAWSCredentialsSecret = "aws-cloud-credentials" - defaultAWSX86InstanceType = "m5.large" - defaultAWSARMInstanceType = "m6g.large" - - // Azure Defaults - defaultAzureVMSize = "Standard_D4s_V3" - defaultAzureCredentialsSecret = "azure-cloud-credentials" - defaultAzureOSDiskOSType = "Linux" - defaultAzureOSDiskStorageType = "Premium_LRS" - azureMaxDiskSizeGB = 32768 - - // GCP Defaults - defaultGCPMachineType = "n1-standard-4" - defaultGCPCredentialsSecret = "gcp-cloud-credentials" - defaultGCPDiskSizeGb = 128 - defaultGCPDiskType = "pd-standard" - // https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.8/48.83.202103122318-0/x86_64/meta.json - // https://github.com/openshift/installer/blob/796a99049d3b7489b6c08ec5bd7c7983731afbcf/data/data/rhcos.json#L90-L94 - defaultGCPDiskImage = "projects/rhcos-cloud/global/images/rhcos-48-83-202103221318-0-gcp-x86-64" - - // vSphere Defaults - defaultVSphereCredentialsSecret = "vsphere-cloud-credentials" - // Minimum vSphere values taken from vSphere reconciler - minVSphereCPU = 2 - minVSphereMemoryMiB = 2048 - // https://docs.openshift.com/container-platform/4.1/installing/installing_vsphere/installing-vsphere.html#minimum-resource-requirements_installing-vsphere - minVSphereDiskGiB = 120 -) - -var ( - // webhookFailurePolicy is ignore so we don't want to block machine lifecycle on the webhook operational aspects. - // This would be particularly problematic for chicken egg issues when bootstrapping a cluster. - webhookFailurePolicy = admissionregistrationv1.Ignore - webhookSideEffects = admissionregistrationv1.SideEffectClassNone -) - -func secretExists(c client.Client, name, namespace string) (bool, error) { - key := client.ObjectKey{ - Name: name, - Namespace: namespace, - } - obj := &corev1.Secret{} - - if err := c.Get(context.Background(), key, obj); err != nil { - if apierrors.IsNotFound(err) { - return false, nil - } - return false, err - } - return true, nil -} - -func credentialsSecretExists(c client.Client, name, namespace string) []string { - secretExists, err := secretExists(c, name, namespace) - if err != nil { - return []string{ - field.Invalid( - field.NewPath("providerSpec", "credentialsSecret"), - name, - fmt.Sprintf("failed to get credentialsSecret: %v", err), - ).Error(), - } - } - - if !secretExists { - return []string{ - field.Invalid( - field.NewPath("providerSpec", "credentialsSecret"), - name, - "not found. Expected CredentialsSecret to exist", - ).Error(), - } - } - - return []string{} -} - -func getInfra() (*osconfigv1.Infrastructure, error) { - cfg, err := ctrl.GetConfig() - if err != nil { - return nil, err - } - client, err := osclientset.NewForConfig(cfg) - if err != nil { - return nil, err - } - infra, err := client.ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) - if err != nil { - return nil, err - } - return infra, nil -} - -func getDNS() (*osconfigv1.DNS, error) { - cfg, err := ctrl.GetConfig() - if err != nil { - return nil, err - } - client, err := osclientset.NewForConfig(cfg) - if err != nil { - return nil, err - } - dns, err := client.ConfigV1().DNSes().Get(context.Background(), "cluster", metav1.GetOptions{}) - if err != nil { - return nil, err - } - - return dns, nil -} - -type machineAdmissionFn func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) - -type admissionConfig struct { - clusterID string - platformStatus *osconfigv1.PlatformStatus - dnsDisconnected bool - client client.Client -} - -type admissionHandler struct { - *admissionConfig - webhookOperations machineAdmissionFn - decoder *admission.Decoder -} - -// InjectDecoder injects the decoder. -func (a *admissionHandler) InjectDecoder(d *admission.Decoder) error { - a.decoder = d - return nil -} - -// machineValidatorHandler validates Machine API resources. -// implements type Handler interface. -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler -type machineValidatorHandler struct { - *admissionHandler -} - -// machineDefaulterHandler defaults Machine API resources. -// implements type Handler interface. -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler -type machineDefaulterHandler struct { - *admissionHandler -} - -// NewValidator returns a new machineValidatorHandler. -func NewMachineValidator(client client.Client) (*machineValidatorHandler, error) { - infra, err := getInfra() - if err != nil { - return nil, err - } - - dns, err := getDNS() - if err != nil { - return nil, err - } - - return createMachineValidator(infra, client, dns), nil -} - -func createMachineValidator(infra *osconfigv1.Infrastructure, client client.Client, dns *osconfigv1.DNS) *machineValidatorHandler { - admissionConfig := &admissionConfig{ - dnsDisconnected: dns.Spec.PublicZone == nil, - clusterID: infra.Status.InfrastructureName, - platformStatus: infra.Status.PlatformStatus, - client: client, - } - return &machineValidatorHandler{ - admissionHandler: &admissionHandler{ - admissionConfig: admissionConfig, - webhookOperations: getMachineValidatorOperation(infra.Status.PlatformStatus.Type), - }, - } -} - -func getMachineValidatorOperation(platform osconfigv1.PlatformType) machineAdmissionFn { - switch platform { - case osconfigv1.AWSPlatformType: - return validateAWS - case osconfigv1.AzurePlatformType: - return validateAzure - case osconfigv1.GCPPlatformType: - return validateGCP - case osconfigv1.VSpherePlatformType: - return validateVSphere - default: - // just no-op - return func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - return true, []string{}, nil - } - } -} - -// NewDefaulter returns a new machineDefaulterHandler. -func NewMachineDefaulter() (*machineDefaulterHandler, error) { - infra, err := getInfra() - if err != nil { - return nil, err - } - - return createMachineDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil -} - -func createMachineDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineDefaulterHandler { - return &machineDefaulterHandler{ - admissionHandler: &admissionHandler{ - admissionConfig: &admissionConfig{clusterID: clusterID}, - webhookOperations: getMachineDefaulterOperation(platformStatus), - }, - } -} - -func getMachineDefaulterOperation(platformStatus *osconfigv1.PlatformStatus) machineAdmissionFn { - switch platformStatus.Type { - case osconfigv1.AWSPlatformType: - region := "" - if platformStatus.AWS != nil { - region = platformStatus.AWS.Region - } - arch := runtime.GOARCH - return awsDefaulter{region: region, arch: arch}.defaultAWS - case osconfigv1.AzurePlatformType: - return defaultAzure - case osconfigv1.GCPPlatformType: - return defaultGCP - case osconfigv1.VSpherePlatformType: - return defaultVSphere - default: - // just no-op - return func(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - return true, []string{}, nil - } - } -} - -// NewValidatingWebhookConfiguration creates a validation webhook configuration with configured Machine and MachineSet webhooks -func NewValidatingWebhookConfiguration() *admissionregistrationv1.ValidatingWebhookConfiguration { - validatingWebhookConfiguration := &admissionregistrationv1.ValidatingWebhookConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaultWebhookConfigurationName, - Annotations: map[string]string{ - "service.beta.openshift.io/inject-cabundle": "true", - }, - }, - Webhooks: []admissionregistrationv1.ValidatingWebhook{ - MachineValidatingWebhook(), - MachineSetValidatingWebhook(), - }, - } - - // Setting group version is required for testEnv to create unstructured objects, as the new structure sets it on empty strings - // Usual way to populate those values, is to create the resource in the cluster first, which we can't yet do. - validatingWebhookConfiguration.SetGroupVersionKind(admissionregistrationv1.SchemeGroupVersion.WithKind("ValidatingWebhookConfiguration")) - return validatingWebhookConfiguration -} - -// MachineValidatingWebhook returns validating webhooks for machine to populate the configuration -func MachineValidatingWebhook() admissionregistrationv1.ValidatingWebhook { - serviceReference := admissionregistrationv1.ServiceReference{ - Namespace: defaultWebhookServiceNamespace, - Name: defaultWebhookServiceName, - Path: pointer.StringPtr(DefaultMachineValidatingHookPath), - Port: pointer.Int32Ptr(defaultWebhookServicePort), - } - return admissionregistrationv1.ValidatingWebhook{ - AdmissionReviewVersions: []string{"v1"}, - Name: "validation.machine.machine.openshift.io", - FailurePolicy: &webhookFailurePolicy, - SideEffects: &webhookSideEffects, - ClientConfig: admissionregistrationv1.WebhookClientConfig{ - Service: &serviceReference, - }, - Rules: []admissionregistrationv1.RuleWithOperations{ - { - Rule: admissionregistrationv1.Rule{ - APIGroups: []string{machine.GroupName}, - APIVersions: []string{SchemeGroupVersion.Version}, - Resources: []string{"machines"}, - }, - Operations: []admissionregistrationv1.OperationType{ - admissionregistrationv1.Create, - admissionregistrationv1.Update, - }, - }, - }, - } -} - -// MachineSetValidatingWebhook returns validating webhooks for machineSet to populate the configuration -func MachineSetValidatingWebhook() admissionregistrationv1.ValidatingWebhook { - machinesetServiceReference := admissionregistrationv1.ServiceReference{ - Namespace: defaultWebhookServiceNamespace, - Name: defaultWebhookServiceName, - Path: pointer.StringPtr(DefaultMachineSetValidatingHookPath), - Port: pointer.Int32Ptr(defaultWebhookServicePort), - } - return admissionregistrationv1.ValidatingWebhook{ - AdmissionReviewVersions: []string{"v1"}, - Name: "validation.machineset.machine.openshift.io", - FailurePolicy: &webhookFailurePolicy, - SideEffects: &webhookSideEffects, - ClientConfig: admissionregistrationv1.WebhookClientConfig{ - Service: &machinesetServiceReference, - }, - Rules: []admissionregistrationv1.RuleWithOperations{ - { - Rule: admissionregistrationv1.Rule{ - APIGroups: []string{machine.GroupName}, - APIVersions: []string{SchemeGroupVersion.Version}, - Resources: []string{"machinesets"}, - }, - Operations: []admissionregistrationv1.OperationType{ - admissionregistrationv1.Create, - admissionregistrationv1.Update, - }, - }, - }, - } -} - -// NewMutatingWebhookConfiguration creates a mutating webhook configuration with configured Machine and MachineSet webhooks -func NewMutatingWebhookConfiguration() *admissionregistrationv1.MutatingWebhookConfiguration { - mutatingWebhookConfiguration := &admissionregistrationv1.MutatingWebhookConfiguration{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaultWebhookConfigurationName, - Annotations: map[string]string{ - "service.beta.openshift.io/inject-cabundle": "true", - }, - }, - Webhooks: []admissionregistrationv1.MutatingWebhook{ - MachineMutatingWebhook(), - MachineSetMutatingWebhook(), - }, - } - - // Setting group version is required for testEnv to create unstructured objects, as the new structure sets it on empty strings - // Usual way to populate those values, is to create the resource in the cluster first, which we can't yet do. - mutatingWebhookConfiguration.SetGroupVersionKind(admissionregistrationv1.SchemeGroupVersion.WithKind("MutatingWebhookConfiguration")) - return mutatingWebhookConfiguration -} - -// MachineMutatingWebhook returns mutating webhooks for machine to apply in configuration -func MachineMutatingWebhook() admissionregistrationv1.MutatingWebhook { - machineServiceReference := admissionregistrationv1.ServiceReference{ - Namespace: defaultWebhookServiceNamespace, - Name: defaultWebhookServiceName, - Path: pointer.StringPtr(DefaultMachineMutatingHookPath), - Port: pointer.Int32Ptr(defaultWebhookServicePort), - } - return admissionregistrationv1.MutatingWebhook{ - AdmissionReviewVersions: []string{"v1"}, - Name: "default.machine.machine.openshift.io", - FailurePolicy: &webhookFailurePolicy, - SideEffects: &webhookSideEffects, - ClientConfig: admissionregistrationv1.WebhookClientConfig{ - Service: &machineServiceReference, - }, - Rules: []admissionregistrationv1.RuleWithOperations{ - { - Rule: admissionregistrationv1.Rule{ - APIGroups: []string{machine.GroupName}, - APIVersions: []string{SchemeGroupVersion.Version}, - Resources: []string{"machines"}, - }, - Operations: []admissionregistrationv1.OperationType{ - admissionregistrationv1.Create, - }, - }, - }, - } -} - -// MachineSetMutatingWebhook returns mutating webhook for machineSet to apply in configuration -func MachineSetMutatingWebhook() admissionregistrationv1.MutatingWebhook { - machineSetServiceReference := admissionregistrationv1.ServiceReference{ - Namespace: defaultWebhookServiceNamespace, - Name: defaultWebhookServiceName, - Path: pointer.StringPtr(DefaultMachineSetMutatingHookPath), - Port: pointer.Int32Ptr(defaultWebhookServicePort), - } - return admissionregistrationv1.MutatingWebhook{ - AdmissionReviewVersions: []string{"v1"}, - Name: "default.machineset.machine.openshift.io", - FailurePolicy: &webhookFailurePolicy, - SideEffects: &webhookSideEffects, - ClientConfig: admissionregistrationv1.WebhookClientConfig{ - Service: &machineSetServiceReference, - }, - Rules: []admissionregistrationv1.RuleWithOperations{ - { - Rule: admissionregistrationv1.Rule{ - APIGroups: []string{machine.GroupName}, - APIVersions: []string{SchemeGroupVersion.Version}, - Resources: []string{"machinesets"}, - }, - Operations: []admissionregistrationv1.OperationType{ - admissionregistrationv1.Create, - }, - }, - }, - } -} - -// Handle handles HTTP requests for admission webhook servers. -func (h *machineValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { - m := &Machine{} - - if err := h.decoder.Decode(req, m); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - - klog.V(3).Infof("Validate webhook called for Machine: %s", m.GetName()) - - ok, warnings, errs := h.webhookOperations(m, h.admissionConfig) - if !ok { - return admission.Denied(errs.Error()).WithWarnings(warnings...) - } - - return admission.Allowed("Machine valid").WithWarnings(warnings...) -} - -// Handle handles HTTP requests for admission webhook servers. -func (h *machineDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { - m := &Machine{} - - if err := h.decoder.Decode(req, m); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - - klog.V(3).Infof("Mutate webhook called for Machine: %s", m.GetName()) - - // Only enforce the clusterID if it's not set. - // Otherwise a discrepancy on the value would leave the machine orphan - // and would trigger a new machine creation by the machineSet. - // https://bugzilla.redhat.com/show_bug.cgi?id=1857175 - if m.Labels == nil { - m.Labels = make(map[string]string) - } - if _, ok := m.Labels[MachineClusterIDLabel]; !ok { - m.Labels[MachineClusterIDLabel] = h.clusterID - } - - ok, warnings, errs := h.webhookOperations(m, h.admissionConfig) - if !ok { - return admission.Denied(errs.Error()).WithWarnings(warnings...) - } - - marshaledMachine, err := json.Marshal(m) - if err != nil { - return admission.Errored(http.StatusInternalServerError, err).WithWarnings(warnings...) - } - return admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachine).WithWarnings(warnings...) -} - -type awsDefaulter struct { - region string - arch string -} - -func (a awsDefaulter) defaultAWS(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Defaulting AWS providerSpec") - - var errs []error - var warnings []string - providerSpec := new(aws.AWSMachineProviderConfig) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.InstanceType == "" { - if a.arch == "arm64" { - providerSpec.InstanceType = defaultAWSARMInstanceType - } else { - providerSpec.InstanceType = defaultAWSX86InstanceType - } - } - - if providerSpec.Placement.Region == "" { - providerSpec.Placement.Region = a.region - } - - if providerSpec.UserDataSecret == nil { - providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} - } - - if providerSpec.CredentialsSecret == nil { - providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultAWSCredentialsSecret} - } - - rawBytes, err := json.Marshal(providerSpec) - if err != nil { - errs = append(errs, err) - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - - m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} - return true, warnings, nil -} - -func unmarshalInto(m *Machine, providerSpec interface{}) error { - if m.Spec.ProviderSpec.Value == nil { - return field.Required(field.NewPath("providerSpec", "value"), "a value must be provided") - } - - if err := yaml.Unmarshal(m.Spec.ProviderSpec.Value.Raw, &providerSpec); err != nil { - return field.Invalid(field.NewPath("providerSpec", "value"), providerSpec, err.Error()) - } - return nil -} - -func validateAWS(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Validating AWS providerSpec") - - var errs []error - var warnings []string - providerSpec := new(aws.AWSMachineProviderConfig) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.AMI.ARN == nil && providerSpec.AMI.Filters == nil && providerSpec.AMI.ID == nil { - errs = append( - errs, - field.Required( - field.NewPath("providerSpec", "ami"), - "expected either providerSpec.ami.arn or providerSpec.ami.filters or providerSpec.ami.id to be populated", - ), - ) - } - - if providerSpec.Placement.Region == "" { - errs = append( - errs, - field.Required( - field.NewPath("providerSpec", "placement", "region"), - "expected providerSpec.placement.region to be populated", - ), - ) - } - - if providerSpec.InstanceType == "" { - errs = append( - errs, - field.Required( - field.NewPath("providerSpec", "instanceType"), - "expected providerSpec.instanceType to be populated", - ), - ) - } - - if providerSpec.UserDataSecret == nil { - errs = append( - errs, - field.Required( - field.NewPath("providerSpec", "userDataSecret"), - "expected providerSpec.userDataSecret to be populated", - ), - ) - } - - if providerSpec.CredentialsSecret == nil { - errs = append( - errs, - field.Required( - field.NewPath("providerSpec", "credentialsSecret"), - "expected providerSpec.credentialsSecret to be populated", - ), - ) - } else { - warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, m.GetNamespace())...) - } - - if providerSpec.Subnet.ARN == nil && providerSpec.Subnet.ID == nil && providerSpec.Subnet.Filters == nil { - warnings = append( - warnings, - "providerSpec.subnet: No subnet has been provided. Instances may be created in an unexpected subnet and may not join the cluster.", - ) - } - - if providerSpec.IAMInstanceProfile == nil { - warnings = append(warnings, "providerSpec.iamInstanceProfile: no IAM instance profile provided: nodes may be unable to join the cluster") - } - - // TODO(alberto): Validate providerSpec.BlockDevices. - // https://github.com/openshift/cluster-api-provider-aws/pull/299#discussion_r433920532 - - switch providerSpec.Placement.Tenancy { - case "", aws.DefaultTenancy, aws.DedicatedTenancy, aws.HostTenancy: - // Do nothing, valid values - default: - errs = append( - errs, - field.Invalid( - field.NewPath("providerSpec", "tenancy"), - providerSpec.Placement.Tenancy, - fmt.Sprintf("Invalid providerSpec.tenancy, the only allowed options are: %s, %s, %s", aws.DefaultTenancy, aws.DedicatedTenancy, aws.HostTenancy), - ), - ) - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - - return true, warnings, nil -} - -func defaultAzure(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Defaulting Azure providerSpec") - - var errs []error - var warnings []string - providerSpec := new(azure.AzureMachineProviderSpec) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.VMSize == "" { - providerSpec.VMSize = defaultAzureVMSize - } - - // Vnet and Subnet need to be provided together by the user - if providerSpec.Vnet == "" && providerSpec.Subnet == "" { - providerSpec.Vnet = defaultAzureVnet(config.clusterID) - providerSpec.Subnet = defaultAzureSubnet(config.clusterID) - } - - if providerSpec.Image == (azure.Image{}) { - providerSpec.Image.ResourceID = defaultAzureImageResourceID(config.clusterID) - } - - if providerSpec.UserDataSecret == nil { - providerSpec.UserDataSecret = &corev1.SecretReference{Name: defaultUserDataSecret} - } else if providerSpec.UserDataSecret.Name == "" { - providerSpec.UserDataSecret.Name = defaultUserDataSecret - } - - if providerSpec.CredentialsSecret == nil { - providerSpec.CredentialsSecret = &corev1.SecretReference{Name: defaultAzureCredentialsSecret, Namespace: defaultSecretNamespace} - } else { - if providerSpec.CredentialsSecret.Namespace == "" { - providerSpec.CredentialsSecret.Namespace = defaultSecretNamespace - } - if providerSpec.CredentialsSecret.Name == "" { - providerSpec.CredentialsSecret.Name = defaultAzureCredentialsSecret - } - } - - rawBytes, err := json.Marshal(providerSpec) - if err != nil { - errs = append(errs, err) - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - - m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} - return true, warnings, nil -} - -func validateAzure(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Validating Azure providerSpec") - - var errs []error - var warnings []string - providerSpec := new(azure.AzureMachineProviderSpec) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.VMSize == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "vmSize"), "vmSize should be set to one of the supported Azure VM sizes")) - } - - if providerSpec.PublicIP && config.dnsDisconnected { - errs = append(errs, field.Forbidden(field.NewPath("providerSpec", "publicIP"), "publicIP is not allowed in Azure disconnected installation")) - } - // Vnet requires Subnet - if providerSpec.Vnet != "" && providerSpec.Subnet == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "subnet"), "must provide a subnet when a virtual network is specified")) - } - - // Subnet requires Vnet - if providerSpec.Subnet != "" && providerSpec.Vnet == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "vnet"), "must provide a virtual network when supplying subnets")) - } - - errs = append(errs, validateAzureImage(providerSpec.Image)...) - - if providerSpec.UserDataSecret == nil { - errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) - } else if providerSpec.UserDataSecret.Name == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) - } - - if providerSpec.CredentialsSecret == nil { - errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) - } else { - if providerSpec.CredentialsSecret.Namespace == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "namespace"), "namespace must be provided")) - } - if providerSpec.CredentialsSecret.Name == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) - } - if providerSpec.CredentialsSecret.Name != "" && providerSpec.CredentialsSecret.Namespace != "" { - warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, providerSpec.CredentialsSecret.Namespace)...) - } - } - - if providerSpec.OSDisk.DiskSizeGB <= 0 || providerSpec.OSDisk.DiskSizeGB >= azureMaxDiskSizeGB { - errs = append(errs, field.Invalid(field.NewPath("providerSpec", "osDisk", "diskSizeGB"), providerSpec.OSDisk.DiskSizeGB, "diskSizeGB must be greater than zero and less than 32768")) - } - - if isAzureGovCloud(config.platformStatus) && providerSpec.SpotVMOptions != nil { - warnings = append(warnings, "spot VMs may not be supported when using GovCloud region") - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - return true, warnings, nil -} - -func validateAzureImage(image azure.Image) []error { - errors := []error{} - if image == (azure.Image{}) { - return append(errors, field.Required(field.NewPath("providerSpec", "image"), "an image reference must be provided")) - } - - if image.ResourceID != "" { - if image != (azure.Image{ResourceID: image.ResourceID}) { - return append(errors, field.Required(field.NewPath("providerSpec", "image", "resourceID"), "resourceID is already specified, other fields such as [Offer, Publisher, SKU, Version] should not be set")) - } - return errors - } - - // Resource ID not provided, so Offer, Publisher, SKU and Version are required - if image.Offer == "" { - errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Offer"), "Offer must be provided")) - } - if image.Publisher == "" { - errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Publisher"), "Publisher must be provided")) - } - if image.SKU == "" { - errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "SKU"), "SKU must be provided")) - } - if image.Version == "" { - errors = append(errors, field.Required(field.NewPath("providerSpec", "image", "Version"), "Version must be provided")) - } - - return errors -} - -func defaultGCP(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Defaulting GCP providerSpec") - - var errs []error - var warnings []string - providerSpec := new(gcp.GCPMachineProviderSpec) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.MachineType == "" { - providerSpec.MachineType = defaultGCPMachineType - } - - if len(providerSpec.NetworkInterfaces) == 0 { - providerSpec.NetworkInterfaces = append(providerSpec.NetworkInterfaces, &gcp.GCPNetworkInterface{ - Network: defaultGCPNetwork(config.clusterID), - Subnetwork: defaultGCPSubnetwork(config.clusterID), - }) - } - - providerSpec.Disks = defaultGCPDisks(providerSpec.Disks, config.clusterID) - - if len(providerSpec.Tags) == 0 { - providerSpec.Tags = defaultGCPTags(config.clusterID) - } - - if providerSpec.UserDataSecret == nil { - providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} - } - - if providerSpec.CredentialsSecret == nil { - providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultGCPCredentialsSecret} - } - - rawBytes, err := json.Marshal(providerSpec) - if err != nil { - errs = append(errs, err) - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - - m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} - return true, warnings, nil -} - -func defaultGCPDisks(disks []*gcp.GCPDisk, clusterID string) []*gcp.GCPDisk { - if len(disks) == 0 { - return []*gcp.GCPDisk{ - { - AutoDelete: true, - Boot: true, - SizeGb: defaultGCPDiskSizeGb, - Type: defaultGCPDiskType, - Image: defaultGCPDiskImage, - }, - } - } - - for _, disk := range disks { - if disk.Type == "" { - disk.Type = defaultGCPDiskType - } - - if disk.Image == "" { - disk.Image = defaultGCPDiskImage - } - } - - return disks -} - -func validateGCP(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Validating GCP providerSpec") - - var errs []error - var warnings []string - providerSpec := new(gcp.GCPMachineProviderSpec) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.Region == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "region"), "region is required")) - } - - if !strings.HasPrefix(providerSpec.Zone, providerSpec.Region) { - errs = append(errs, field.Invalid(field.NewPath("providerSpec", "zone"), providerSpec.Zone, fmt.Sprintf("zone not in configured region (%s)", providerSpec.Region))) - } - - if providerSpec.MachineType == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "machineType"), "machineType should be set to one of the supported GCP machine types")) - } - - errs = append(errs, validateGCPNetworkInterfaces(providerSpec.NetworkInterfaces, field.NewPath("providerSpec", "networkInterfaces"))...) - errs = append(errs, validateGCPDisks(providerSpec.Disks, field.NewPath("providerSpec", "disks"))...) - - if len(providerSpec.ServiceAccounts) == 0 { - warnings = append(warnings, "providerSpec.serviceAccounts: no service account provided: nodes may be unable to join the cluster") - } else { - errs = append(errs, validateGCPServiceAccounts(providerSpec.ServiceAccounts, field.NewPath("providerSpec", "serviceAccounts"))...) - } - - if providerSpec.UserDataSecret == nil { - errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) - } else { - if providerSpec.UserDataSecret.Name == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) - } - } - - if providerSpec.CredentialsSecret == nil { - errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) - } else { - if providerSpec.CredentialsSecret.Name == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) - } else { - warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, m.GetNamespace())...) - } - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - return true, warnings, nil -} - -func validateGCPNetworkInterfaces(networkInterfaces []*gcp.GCPNetworkInterface, parentPath *field.Path) []error { - if len(networkInterfaces) == 0 { - return []error{field.Required(parentPath, "at least 1 network interface is required")} - } - - var errs []error - for i, ni := range networkInterfaces { - fldPath := parentPath.Index(i) - - if ni.Network == "" { - errs = append(errs, field.Required(fldPath.Child("network"), "network is required")) - } - - if ni.Subnetwork == "" { - errs = append(errs, field.Required(fldPath.Child("subnetwork"), "subnetwork is required")) - } - } - - return errs -} - -func validateGCPDisks(disks []*gcp.GCPDisk, parentPath *field.Path) []error { - if len(disks) == 0 { - return []error{field.Required(parentPath, "at least 1 disk is required")} - } - - var errs []error - for i, disk := range disks { - fldPath := parentPath.Index(i) - - if disk.SizeGb != 0 { - if disk.SizeGb < 16 { - errs = append(errs, field.Invalid(fldPath.Child("sizeGb"), disk.SizeGb, "must be at least 16GB in size")) - } else if disk.SizeGb > 65536 { - errs = append(errs, field.Invalid(fldPath.Child("sizeGb"), disk.SizeGb, "exceeding maximum GCP disk size limit, must be below 65536")) - } - } - - if disk.Type != "" { - diskTypes := sets.NewString("pd-standard", "pd-ssd") - if !diskTypes.Has(disk.Type) { - errs = append(errs, field.NotSupported(fldPath.Child("type"), disk.Type, diskTypes.List())) - } - } - } - - return errs -} - -func validateGCPServiceAccounts(serviceAccounts []gcp.GCPServiceAccount, parentPath *field.Path) []error { - if len(serviceAccounts) != 1 { - return []error{field.Invalid(parentPath, fmt.Sprintf("%d service accounts supplied", len(serviceAccounts)), "exactly 1 service account must be supplied")} - } - - var errs []error - for i, serviceAccount := range serviceAccounts { - fldPath := parentPath.Index(i) - - if serviceAccount.Email == "" { - errs = append(errs, field.Required(fldPath.Child("email"), "email is required")) - } - - if len(serviceAccount.Scopes) == 0 { - errs = append(errs, field.Required(fldPath.Child("scopes"), "at least 1 scope is required")) - } - } - return errs -} - -func defaultVSphere(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Defaulting vSphere providerSpec") - - var errs []error - var warnings []string - providerSpec := new(vsphere.VSphereMachineProviderSpec) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.UserDataSecret == nil { - providerSpec.UserDataSecret = &corev1.LocalObjectReference{Name: defaultUserDataSecret} - } - - if providerSpec.CredentialsSecret == nil { - providerSpec.CredentialsSecret = &corev1.LocalObjectReference{Name: defaultVSphereCredentialsSecret} - } - - rawBytes, err := json.Marshal(providerSpec) - if err != nil { - errs = append(errs, err) - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - - m.Spec.ProviderSpec.Value = &kruntime.RawExtension{Raw: rawBytes} - return true, warnings, nil -} - -func validateVSphere(m *Machine, config *admissionConfig) (bool, []string, utilerrors.Aggregate) { - klog.V(3).Infof("Validating vSphere providerSpec") - - var errs []error - var warnings []string - providerSpec := new(vsphere.VSphereMachineProviderSpec) - if err := unmarshalInto(m, providerSpec); err != nil { - errs = append(errs, err) - return false, warnings, utilerrors.NewAggregate(errs) - } - - if providerSpec.Template == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "template"), "template must be provided")) - } - - workspaceWarnings, workspaceErrors := validateVSphereWorkspace(providerSpec.Workspace, field.NewPath("providerSpec", "workspace")) - warnings = append(warnings, workspaceWarnings...) - errs = append(errs, workspaceErrors...) - - errs = append(errs, validateVSphereNetwork(providerSpec.Network, field.NewPath("providerSpec", "network"))...) - - if providerSpec.NumCPUs < minVSphereCPU { - warnings = append(warnings, fmt.Sprintf("providerSpec.numCPUs: %d is missing or less than the minimum value (%d): nodes may not boot correctly", providerSpec.NumCPUs, minVSphereCPU)) - } - if providerSpec.MemoryMiB < minVSphereMemoryMiB { - warnings = append(warnings, fmt.Sprintf("providerSpec.memoryMiB: %d is missing or less than the recommended minimum value (%d): nodes may not boot correctly", providerSpec.MemoryMiB, minVSphereMemoryMiB)) - } - if providerSpec.DiskGiB < minVSphereDiskGiB { - warnings = append(warnings, fmt.Sprintf("providerSpec.diskGiB: %d is missing or less than the recommended minimum (%d): nodes may fail to start if disk size is too low", providerSpec.DiskGiB, minVSphereDiskGiB)) - } - - if providerSpec.UserDataSecret == nil { - errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret"), "userDataSecret must be provided")) - } else { - if providerSpec.UserDataSecret.Name == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "userDataSecret", "name"), "name must be provided")) - } - } - - if providerSpec.CredentialsSecret == nil { - errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret"), "credentialsSecret must be provided")) - } else { - if providerSpec.CredentialsSecret.Name == "" { - errs = append(errs, field.Required(field.NewPath("providerSpec", "credentialsSecret", "name"), "name must be provided")) - } else { - warnings = append(warnings, credentialsSecretExists(config.client, providerSpec.CredentialsSecret.Name, m.GetNamespace())...) - } - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - return true, warnings, nil -} - -func validateVSphereWorkspace(workspace *vsphere.Workspace, parentPath *field.Path) ([]string, []error) { - if workspace == nil { - return []string{}, []error{field.Required(parentPath, "workspace must be provided")} - } - - var errs []error - var warnings []string - if workspace.Server == "" { - errs = append(errs, field.Required(parentPath.Child("server"), "server must be provided")) - } - if workspace.Datacenter == "" { - warnings = append(warnings, fmt.Sprintf("%s: datacenter is unset: if more than one datacenter is present, VMs cannot be created", parentPath.Child("datacenter"))) - } - if workspace.Folder != "" { - expectedPrefix := fmt.Sprintf("/%s/vm/", workspace.Datacenter) - if !strings.HasPrefix(workspace.Folder, expectedPrefix) { - errMsg := fmt.Sprintf("folder must be absolute path: expected prefix %q", expectedPrefix) - errs = append(errs, field.Invalid(parentPath.Child("folder"), workspace.Folder, errMsg)) - } - } - - return warnings, errs -} - -func validateVSphereNetwork(network vsphere.NetworkSpec, parentPath *field.Path) []error { - if len(network.Devices) == 0 { - return []error{field.Required(parentPath.Child("devices"), "at least 1 network device must be provided")} - } - - var errs []error - for i, spec := range network.Devices { - fldPath := parentPath.Child("devices").Index(i) - if spec.NetworkName == "" { - errs = append(errs, field.Required(fldPath.Child("networkName"), "networkName must be provided")) - } - } - - return errs -} - -func isAzureGovCloud(platformStatus *osconfigv1.PlatformStatus) bool { - return platformStatus != nil && platformStatus.Azure != nil && - platformStatus.Azure.CloudName != osconfigv1.AzurePublicCloud -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go deleted file mode 100644 index 7d71ed541..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/machineset_webhook.go +++ /dev/null @@ -1,150 +0,0 @@ -package v1beta1 - -import ( - "context" - "encoding/json" - "net/http" - - osconfigv1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" -) - -// machineSetValidatorHandler validates MachineSet API resources. -// implements type Handler interface. -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler -type machineSetValidatorHandler struct { - *admissionHandler -} - -// machineSetDefaulterHandler defaults MachineSet API resources. -// implements type Handler interface. -// https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/webhook/admission#Handler -type machineSetDefaulterHandler struct { - *admissionHandler -} - -// NewMachineSetValidator returns a new machineSetValidatorHandler. -func NewMachineSetValidator(client client.Client) (*machineSetValidatorHandler, error) { - infra, err := getInfra() - if err != nil { - return nil, err - } - - dns, err := getDNS() - if err != nil { - return nil, err - } - - return createMachineSetValidator(infra, client, dns), nil -} - -func createMachineSetValidator(infra *osconfigv1.Infrastructure, client client.Client, dns *osconfigv1.DNS) *machineSetValidatorHandler { - admissionConfig := &admissionConfig{ - dnsDisconnected: dns.Spec.PublicZone == nil, - clusterID: infra.Status.InfrastructureName, - client: client, - } - return &machineSetValidatorHandler{ - admissionHandler: &admissionHandler{ - admissionConfig: admissionConfig, - webhookOperations: getMachineValidatorOperation(infra.Status.PlatformStatus.Type), - }, - } -} - -// NewMachineSetDefaulter returns a new machineSetDefaulterHandler. -func NewMachineSetDefaulter() (*machineSetDefaulterHandler, error) { - infra, err := getInfra() - if err != nil { - return nil, err - } - - return createMachineSetDefaulter(infra.Status.PlatformStatus, infra.Status.InfrastructureName), nil -} - -func createMachineSetDefaulter(platformStatus *osconfigv1.PlatformStatus, clusterID string) *machineSetDefaulterHandler { - return &machineSetDefaulterHandler{ - admissionHandler: &admissionHandler{ - admissionConfig: &admissionConfig{clusterID: clusterID}, - webhookOperations: getMachineDefaulterOperation(platformStatus), - }, - } -} - -// Handle handles HTTP requests for admission webhook servers. -func (h *machineSetValidatorHandler) Handle(ctx context.Context, req admission.Request) admission.Response { - ms := &MachineSet{} - - if err := h.decoder.Decode(req, ms); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - - klog.V(3).Infof("Validate webhook called for MachineSet: %s", ms.GetName()) - - ok, warnings, errs := h.validateMachineSet(ms) - if !ok { - return admission.Denied(errs.Error()).WithWarnings(warnings...) - } - - return admission.Allowed("MachineSet valid").WithWarnings(warnings...) -} - -// Handle handles HTTP requests for admission webhook servers. -func (h *machineSetDefaulterHandler) Handle(ctx context.Context, req admission.Request) admission.Response { - ms := &MachineSet{} - - if err := h.decoder.Decode(req, ms); err != nil { - return admission.Errored(http.StatusBadRequest, err) - } - - klog.V(3).Infof("Mutate webhook called for MachineSet: %s", ms.GetName()) - - ok, warnings, errs := h.defaultMachineSet(ms) - if !ok { - return admission.Denied(errs.Error()).WithWarnings(warnings...) - } - - marshaledMachineSet, err := json.Marshal(ms) - if err != nil { - return admission.Errored(http.StatusInternalServerError, err).WithWarnings(warnings...) - } - return admission.PatchResponseFromRaw(req.Object.Raw, marshaledMachineSet).WithWarnings(warnings...) -} - -func (h *machineSetValidatorHandler) validateMachineSet(ms *MachineSet) (bool, []string, utilerrors.Aggregate) { - var errs []error - - // Create a Machine from the MachineSet and validate the Machine template - m := &Machine{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ms.GetNamespace(), - }, - Spec: ms.Spec.Template.Spec, - } - ok, warnings, err := h.webhookOperations(m, h.admissionConfig) - if !ok { - errs = append(errs, err.Errors()...) - } - - if len(errs) > 0 { - return false, warnings, utilerrors.NewAggregate(errs) - } - return true, warnings, nil -} - -func (h *machineSetDefaulterHandler) defaultMachineSet(ms *MachineSet) (bool, []string, utilerrors.Aggregate) { - // Create a Machine from the MachineSet and default the Machine template - m := &Machine{Spec: ms.Spec.Template.Spec} - ok, warnings, err := h.webhookOperations(m, h.admissionConfig) - if !ok { - return false, warnings, utilerrors.NewAggregate(err.Errors()) - } - - // Restore the defaulted template - ms.Spec.Template.Spec = m.Spec - return true, warnings, nil -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/register.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/register.go deleted file mode 100644 index 7aef2f415..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/register.go +++ /dev/null @@ -1,45 +0,0 @@ -// NOTE: Boilerplate only. Ignore this file. - -// Package v1alpha1 contains API Schema definitions for the healthchecking v1beta1 API group -// +k8s:deepcopy-gen=package,register -// +groupName=machine.openshift.io -package v1beta1 - -import ( - "github.com/openshift/machine-api-operator/pkg/apis/machine" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: machine.GroupName, Version: "v1beta1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &MachineHealthCheck{}, - &MachineHealthCheckList{}, - &Machine{}, - &MachineList{}, - &MachineSet{}, - &MachineSetList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index abbd90b22..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,571 +0,0 @@ -// +build !ignore_autogenerated - -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Condition) DeepCopyInto(out *Condition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. -func (in *Condition) DeepCopy() *Condition { - if in == nil { - return nil - } - out := new(Condition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Conditions) DeepCopyInto(out *Conditions) { - { - in := &in - *out = make(Conditions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. -func (in Conditions) DeepCopy() Conditions { - if in == nil { - return nil - } - out := new(Conditions) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LastOperation) DeepCopyInto(out *LastOperation) { - *out = *in - if in.Description != nil { - in, out := &in.Description, &out.Description - *out = new(string) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } - if in.State != nil { - in, out := &in.State, &out.State - *out = new(string) - **out = **in - } - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. -func (in *LastOperation) DeepCopy() *LastOperation { - if in == nil { - return nil - } - out := new(LastOperation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Machine) DeepCopyInto(out *Machine) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. -func (in *Machine) DeepCopy() *Machine { - if in == nil { - return nil - } - out := new(Machine) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Machine) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineHealthCheck) DeepCopyInto(out *MachineHealthCheck) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheck. -func (in *MachineHealthCheck) DeepCopy() *MachineHealthCheck { - if in == nil { - return nil - } - out := new(MachineHealthCheck) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineHealthCheck) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineHealthCheckList) DeepCopyInto(out *MachineHealthCheckList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineHealthCheck, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckList. -func (in *MachineHealthCheckList) DeepCopy() *MachineHealthCheckList { - if in == nil { - return nil - } - out := new(MachineHealthCheckList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineHealthCheckList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { - *out = *in - in.Selector.DeepCopyInto(&out.Selector) - if in.UnhealthyConditions != nil { - in, out := &in.UnhealthyConditions, &out.UnhealthyConditions - *out = make([]UnhealthyCondition, len(*in)) - copy(*out, *in) - } - if in.MaxUnhealthy != nil { - in, out := &in.MaxUnhealthy, &out.MaxUnhealthy - *out = new(intstr.IntOrString) - **out = **in - } - if in.NodeStartupTimeout != nil { - in, out := &in.NodeStartupTimeout, &out.NodeStartupTimeout - *out = new(v1.Duration) - **out = **in - } - if in.RemediationTemplate != nil { - in, out := &in.RemediationTemplate, &out.RemediationTemplate - *out = new(corev1.ObjectReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckSpec. -func (in *MachineHealthCheckSpec) DeepCopy() *MachineHealthCheckSpec { - if in == nil { - return nil - } - out := new(MachineHealthCheckSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineHealthCheckStatus) DeepCopyInto(out *MachineHealthCheckStatus) { - *out = *in - if in.ExpectedMachines != nil { - in, out := &in.ExpectedMachines, &out.ExpectedMachines - *out = new(int) - **out = **in - } - if in.CurrentHealthy != nil { - in, out := &in.CurrentHealthy, &out.CurrentHealthy - *out = new(int) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make(Conditions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckStatus. -func (in *MachineHealthCheckStatus) DeepCopy() *MachineHealthCheckStatus { - if in == nil { - return nil - } - out := new(MachineHealthCheckStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineList) DeepCopyInto(out *MachineList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Machine, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. -func (in *MachineList) DeepCopy() *MachineList { - if in == nil { - return nil - } - out := new(MachineList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSet) DeepCopyInto(out *MachineSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. -func (in *MachineSet) DeepCopy() *MachineSet { - if in == nil { - return nil - } - out := new(MachineSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MachineSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. -func (in *MachineSetList) DeepCopy() *MachineSetList { - if in == nil { - return nil - } - out := new(MachineSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MachineSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - in.Selector.DeepCopyInto(&out.Selector) - in.Template.DeepCopyInto(&out.Template) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. -func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { - if in == nil { - return nil - } - out := new(MachineSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { - *out = *in - if in.ErrorReason != nil { - in, out := &in.ErrorReason, &out.ErrorReason - *out = new(MachineSetStatusError) - **out = **in - } - if in.ErrorMessage != nil { - in, out := &in.ErrorMessage, &out.ErrorMessage - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. -func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { - if in == nil { - return nil - } - out := new(MachineSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]corev1.Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) - if in.ProviderID != nil { - in, out := &in.ProviderID, &out.ProviderID - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. -func (in *MachineSpec) DeepCopy() *MachineSpec { - if in == nil { - return nil - } - out := new(MachineSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { - *out = *in - if in.NodeRef != nil { - in, out := &in.NodeRef, &out.NodeRef - *out = new(corev1.ObjectReference) - **out = **in - } - if in.LastUpdated != nil { - in, out := &in.LastUpdated, &out.LastUpdated - *out = (*in).DeepCopy() - } - if in.ErrorReason != nil { - in, out := &in.ErrorReason, &out.ErrorReason - *out = new(MachineStatusError) - **out = **in - } - if in.ErrorMessage != nil { - in, out := &in.ErrorMessage, &out.ErrorMessage - *out = new(string) - **out = **in - } - if in.ProviderStatus != nil { - in, out := &in.ProviderStatus, &out.ProviderStatus - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]corev1.NodeAddress, len(*in)) - copy(*out, *in) - } - if in.LastOperation != nil { - in, out := &in.LastOperation, &out.LastOperation - *out = new(LastOperation) - (*in).DeepCopyInto(*out) - } - if in.Phase != nil { - in, out := &in.Phase, &out.Phase - *out = new(string) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make(Conditions, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. -func (in *MachineStatus) DeepCopy() *MachineStatus { - if in == nil { - return nil - } - out := new(MachineStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. -func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { - if in == nil { - return nil - } - out := new(MachineTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]v1.OwnerReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. -func (in *ObjectMeta) DeepCopy() *ObjectMeta { - if in == nil { - return nil - } - out := new(ObjectMeta) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(runtime.RawExtension) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. -func (in *ProviderSpec) DeepCopy() *ProviderSpec { - if in == nil { - return nil - } - out := new(ProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UnhealthyCondition) DeepCopyInto(out *UnhealthyCondition) { - *out = *in - out.Timeout = in.Timeout -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyCondition. -func (in *UnhealthyCondition) DeepCopy() *UnhealthyCondition { - if in == nil { - return nil - } - out := new(UnhealthyCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/doc.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/doc.go deleted file mode 100644 index d59eebf3d..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package v1beta1 contains API Schema definitions for the vsphereprovider v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider -// +k8s:defaulter-gen=TypeMeta -// +groupName=vsphereprovider.machine.openshift.io -package v1beta1 diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go deleted file mode 100644 index 741becbf3..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/register.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package v1beta1 contains API Schema definitions for the vsphereprovider v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider -// +k8s:defaulter-gen=TypeMeta -// +groupName=vsphereprovider.machine.openshift.io -package v1beta1 - -import ( - "encoding/json" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/scheme" - "sigs.k8s.io/yaml" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "vsphereprovider.openshift.io", Version: "v1beta1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) - -// RawExtensionFromProviderSpec marshals the machine provider spec. -func RawExtensionFromProviderSpec(spec *VSphereMachineProviderSpec) (*runtime.RawExtension, error) { - if spec == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - if rawBytes, err = json.Marshal(spec); err != nil { - return nil, fmt.Errorf("error marshalling providerSpec: %v", err) - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -// RawExtensionFromProviderStatus marshals the provider status -func RawExtensionFromProviderStatus(status *VSphereMachineProviderStatus) (*runtime.RawExtension, error) { - if status == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - if rawBytes, err = json.Marshal(status); err != nil { - return nil, fmt.Errorf("error marshalling providerStatus: %v", err) - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -// ProviderSpecFromRawExtension unmarshals the JSON-encoded spec -func ProviderSpecFromRawExtension(rawExtension *runtime.RawExtension) (*VSphereMachineProviderSpec, error) { - if rawExtension == nil { - return &VSphereMachineProviderSpec{}, nil - } - - spec := new(VSphereMachineProviderSpec) - if err := yaml.Unmarshal(rawExtension.Raw, &spec); err != nil { - return nil, fmt.Errorf("error unmarshalling providerSpec: %v", err) - } - - klog.V(5).Infof("Got provider spec from raw extension: %+v", spec) - return spec, nil -} - -// ProviderStatusFromRawExtension unmarshals a raw extension into a VSphereMachineProviderStatus type -func ProviderStatusFromRawExtension(rawExtension *runtime.RawExtension) (*VSphereMachineProviderStatus, error) { - if rawExtension == nil { - return &VSphereMachineProviderStatus{}, nil - } - - providerStatus := new(VSphereMachineProviderStatus) - if err := yaml.Unmarshal(rawExtension.Raw, providerStatus); err != nil { - return nil, fmt.Errorf("error unmarshalling providerStatus: %v", err) - } - - klog.V(5).Infof("Got provider Status from raw extension: %+v", providerStatus) - return providerStatus, nil -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderstatus_types.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderstatus_types.go deleted file mode 100644 index ccffbb367..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/vsphereproviderstatus_types.go +++ /dev/null @@ -1,83 +0,0 @@ -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// VSphereMachineProviderConditionType is a valid value for VSphereMachineProviderCondition.Type. -type VSphereMachineProviderConditionType string - -// Valid conditions for an vSphere machine instance. -const ( - // MachineCreation indicates whether the machine has been created or not. If not, - // it should include a reason and message for the failure. - MachineCreation VSphereMachineProviderConditionType = "MachineCreation" -) - -// VSphereMachineProviderConditionReason is reason for the condition's last transition. -type VSphereMachineProviderConditionReason string - -const ( - // MachineCreationSucceeded indicates machine creation success. - MachineCreationSucceeded VSphereMachineProviderConditionReason = "MachineCreationSucceeded" - // MachineCreationFailed indicates machine creation failure. - MachineCreationFailed VSphereMachineProviderConditionReason = "MachineCreationFailed" -) - -// VSphereMachineProviderCondition is a condition in a VSphereMachineProviderStatus. -type VSphereMachineProviderCondition struct { - // Type is the type of the condition. - Type VSphereMachineProviderConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason VSphereMachineProviderConditionReason `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. -// It contains VSphere-specific status information. -// +k8s:openapi-gen=true -type VSphereMachineProviderStatus struct { - metav1.TypeMeta `json:",inline"` - - // TODO: populate what we need here: - // InstanceID is the ID of the instance in VSphere - // +optional - InstanceID *string `json:"instanceId,omitempty"` - - // InstanceState is the provisioning state of the VSphere Instance. - // +optional - InstanceState *string `json:"instanceState,omitempty"` - // - // TaskRef? - // Ready? - // Conditions is a set of conditions associated with the Machine to indicate - // errors or other status - Conditions []VSphereMachineProviderCondition `json:"conditions,omitempty"` - - // TaskRef is a managed object reference to a Task related to the machine. - // This value is set automatically at runtime and should not be set or - // modified by users. - // +optional - TaskRef string `json:"taskRef,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -func init() { - SchemeBuilder.Register(&VSphereMachineProviderStatus{}) -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 36ab434bc..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,176 +0,0 @@ -// +build !ignore_autogenerated - -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceSpec. -func (in *NetworkDeviceSpec) DeepCopy() *NetworkDeviceSpec { - if in == nil { - return nil - } - out := new(NetworkDeviceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { - *out = *in - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]NetworkDeviceSpec, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. -func (in *NetworkSpec) DeepCopy() *NetworkSpec { - if in == nil { - return nil - } - out := new(NetworkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSphereMachineProviderCondition) DeepCopyInto(out *VSphereMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderCondition. -func (in *VSphereMachineProviderCondition) DeepCopy() *VSphereMachineProviderCondition { - if in == nil { - return nil - } - out := new(VSphereMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.UserDataSecret != nil { - in, out := &in.UserDataSecret, &out.UserDataSecret - *out = new(v1.LocalObjectReference) - **out = **in - } - if in.CredentialsSecret != nil { - in, out := &in.CredentialsSecret, &out.CredentialsSecret - *out = new(v1.LocalObjectReference) - **out = **in - } - if in.Workspace != nil { - in, out := &in.Workspace, &out.Workspace - *out = new(Workspace) - **out = **in - } - in.Network.DeepCopyInto(&out.Network) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderSpec. -func (in *VSphereMachineProviderSpec) DeepCopy() *VSphereMachineProviderSpec { - if in == nil { - return nil - } - out := new(VSphereMachineProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VSphereMachineProviderSpec) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VSphereMachineProviderStatus) DeepCopyInto(out *VSphereMachineProviderStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.InstanceID != nil { - in, out := &in.InstanceID, &out.InstanceID - *out = new(string) - **out = **in - } - if in.InstanceState != nil { - in, out := &in.InstanceState, &out.InstanceState - *out = new(string) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]VSphereMachineProviderCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderStatus. -func (in *VSphereMachineProviderStatus) DeepCopy() *VSphereMachineProviderStatus { - if in == nil { - return nil - } - out := new(VSphereMachineProviderStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VSphereMachineProviderStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Workspace) DeepCopyInto(out *Workspace) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. -func (in *Workspace) DeepCopy() *Workspace { - if in == nil { - return nil - } - out := new(Workspace) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme/doc.go deleted file mode 100644 index 35df78e9c..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/doc.go b/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/doc.go deleted file mode 100644 index 813ac9eb6..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/generated_expansion.go b/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/generated_expansion.go deleted file mode 100644 index d3ff3bc39..000000000 --- a/vendor/github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1/generated_expansion.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2019 Red Hat, Inc. - * - */ -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type MachineExpansion interface{} - -type MachineHealthCheckExpansion interface{} - -type MachineSetExpansion interface{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go deleted file mode 100644 index bd82a9d65..000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ /dev/null @@ -1,271 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "regexp" - "unicode" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" -) - -func ValidateLabelSelector(ps *metav1.LabelSelector, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - if ps == nil { - return allErrs - } - allErrs = append(allErrs, ValidateLabels(ps.MatchLabels, fldPath.Child("matchLabels"))...) - for i, expr := range ps.MatchExpressions { - allErrs = append(allErrs, ValidateLabelSelectorRequirement(expr, fldPath.Child("matchExpressions").Index(i))...) - } - return allErrs -} - -func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - switch sr.Operator { - case metav1.LabelSelectorOpIn, metav1.LabelSelectorOpNotIn: - if len(sr.Values) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) - } - case metav1.LabelSelectorOpExists, metav1.LabelSelectorOpDoesNotExist: - if len(sr.Values) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) - } - default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operator"), sr.Operator, "not a valid selector operator")) - } - allErrs = append(allErrs, ValidateLabelName(sr.Key, fldPath.Child("key"))...) - return allErrs -} - -// ValidateLabelName validates that the label name is correctly defined. -func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for _, msg := range validation.IsQualifiedName(labelName) { - allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) - } - return allErrs -} - -// ValidateLabels validates that a set of labels are correctly defined. -func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - for k, v := range labels { - allErrs = append(allErrs, ValidateLabelName(k, fldPath)...) - for _, msg := range validation.IsValidLabelValue(v) { - allErrs = append(allErrs, field.Invalid(fldPath, v, msg)) - } - } - return allErrs -} - -func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList { - allErrs := field.ErrorList{} - //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed - if options.OrphanDependents != nil && options.PropagationPolicy != nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set")) - } - if options.PropagationPolicy != nil && - *options.PropagationPolicy != metav1.DeletePropagationForeground && - *options.PropagationPolicy != metav1.DeletePropagationBackground && - *options.PropagationPolicy != metav1.DeletePropagationOrphan { - allErrs = append(allErrs, field.NotSupported(field.NewPath("propagationPolicy"), options.PropagationPolicy, []string{string(metav1.DeletePropagationForeground), string(metav1.DeletePropagationBackground), string(metav1.DeletePropagationOrphan), "nil"})) - } - allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) - return allErrs -} - -func ValidateCreateOptions(options *metav1.CreateOptions) field.ErrorList { - return append( - ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), - ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., - ) -} - -func ValidateUpdateOptions(options *metav1.UpdateOptions) field.ErrorList { - return append( - ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager")), - ValidateDryRun(field.NewPath("dryRun"), options.DryRun)..., - ) -} - -func ValidatePatchOptions(options *metav1.PatchOptions, patchType types.PatchType) field.ErrorList { - allErrs := field.ErrorList{} - if patchType != types.ApplyPatchType { - if options.Force != nil { - allErrs = append(allErrs, field.Forbidden(field.NewPath("force"), "may not be specified for non-apply patch")) - } - } else { - if options.FieldManager == "" { - // This field is defaulted to "kubectl" by kubectl, but HAS TO be explicitly set by controllers. - allErrs = append(allErrs, field.Required(field.NewPath("fieldManager"), "is required for apply patch")) - } - } - allErrs = append(allErrs, ValidateFieldManager(options.FieldManager, field.NewPath("fieldManager"))...) - allErrs = append(allErrs, ValidateDryRun(field.NewPath("dryRun"), options.DryRun)...) - return allErrs -} - -var FieldManagerMaxLength = 128 - -// ValidateFieldManager valides that the fieldManager is the proper length and -// only has printable characters. -func ValidateFieldManager(fieldManager string, fldPath *field.Path) field.ErrorList { - allErrs := field.ErrorList{} - // the field can not be set as a `*string`, so a empty string ("") is - // considered as not set and is defaulted by the rest of the process - // (unless apply is used, in which case it is required). - if len(fieldManager) > FieldManagerMaxLength { - allErrs = append(allErrs, field.TooLong(fldPath, fieldManager, FieldManagerMaxLength)) - } - // Verify that all characters are printable. - for i, r := range fieldManager { - if !unicode.IsPrint(r) { - allErrs = append(allErrs, field.Invalid(fldPath, fieldManager, fmt.Sprintf("invalid character %#U (at position %d)", r, i))) - } - } - - return allErrs -} - -var allowedDryRunValues = sets.NewString(metav1.DryRunAll) - -// ValidateDryRun validates that a dryRun query param only contains allowed values. -func ValidateDryRun(fldPath *field.Path, dryRun []string) field.ErrorList { - allErrs := field.ErrorList{} - if !allowedDryRunValues.HasAll(dryRun...) { - allErrs = append(allErrs, field.NotSupported(fldPath, dryRun, allowedDryRunValues.List())) - } - return allErrs -} - -const UninitializedStatusUpdateErrorMsg string = `must not update status when the object is uninitialized` - -// ValidateTableOptions returns any invalid flags on TableOptions. -func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList { - var allErrs field.ErrorList - switch opts.IncludeObject { - case metav1.IncludeMetadata, metav1.IncludeNone, metav1.IncludeObject, "": - default: - allErrs = append(allErrs, field.Invalid(field.NewPath("includeObject"), opts.IncludeObject, "must be 'Metadata', 'Object', 'None', or empty")) - } - return allErrs -} - -const MaxSubresourceNameLength = 256 - -func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - for i, fields := range fieldsList { - fldPath := fldPath.Index(i) - switch fields.Operation { - case metav1.ManagedFieldsOperationApply, metav1.ManagedFieldsOperationUpdate: - default: - allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`")) - } - if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`")) - } - allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...) - - if len(fields.Subresource) > MaxSubresourceNameLength { - allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength)) - } - } - return allErrs -} - -func ValidateConditions(conditions []metav1.Condition, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - conditionTypeToFirstIndex := map[string]int{} - for i, condition := range conditions { - if _, ok := conditionTypeToFirstIndex[condition.Type]; ok { - allErrs = append(allErrs, field.Duplicate(fldPath.Index(i).Child("type"), condition.Type)) - } else { - conditionTypeToFirstIndex[condition.Type] = i - } - - allErrs = append(allErrs, ValidateCondition(condition, fldPath.Index(i))...) - } - - return allErrs -} - -// validConditionStatuses is used internally to check validity and provide a good message -var validConditionStatuses = sets.NewString(string(metav1.ConditionTrue), string(metav1.ConditionFalse), string(metav1.ConditionUnknown)) - -const ( - maxReasonLen = 1 * 1024 - maxMessageLen = 32 * 1024 -) - -func ValidateCondition(condition metav1.Condition, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - // type is set and is a valid format - allErrs = append(allErrs, ValidateLabelName(condition.Type, fldPath.Child("type"))...) - - // status is set and is an accepted value - if !validConditionStatuses.Has(string(condition.Status)) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("status"), condition.Status, validConditionStatuses.List())) - } - - if condition.ObservedGeneration < 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Child("observedGeneration"), condition.ObservedGeneration, "must be greater than or equal to zero")) - } - - if condition.LastTransitionTime.IsZero() { - allErrs = append(allErrs, field.Required(fldPath.Child("lastTransitionTime"), "must be set")) - } - - if len(condition.Reason) == 0 { - allErrs = append(allErrs, field.Required(fldPath.Child("reason"), "must be set")) - } else { - for _, currErr := range isValidConditionReason(condition.Reason) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("reason"), condition.Reason, currErr)) - } - if len(condition.Reason) > maxReasonLen { - allErrs = append(allErrs, field.TooLong(fldPath.Child("reason"), condition.Reason, maxReasonLen)) - } - } - - if len(condition.Message) > maxMessageLen { - allErrs = append(allErrs, field.TooLong(fldPath.Child("message"), condition.Message, maxMessageLen)) - } - - return allErrs -} - -const conditionReasonFmt string = "[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?" -const conditionReasonErrMsg string = "a condition reason must start with alphabetic character, optionally followed by a string of alphanumeric characters or '_,:', and must end with an alphanumeric character or '_'" - -var conditionReasonRegexp = regexp.MustCompile("^" + conditionReasonFmt + "$") - -// isValidConditionReason tests for a string that conforms to rules for condition reasons. This checks the format, but not the length. -func isValidConditionReason(value string) []string { - if !conditionReasonRegexp.MatchString(value) { - return []string{validation.RegexError(conditionReasonErrMsg, conditionReasonFmt, "my_name", "MY_NAME", "MyName", "ReasonA,ReasonB", "ReasonA:ReasonB")} - } - return nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6e8fc9042..f1105caca 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,6 +17,7 @@ github.com/Azure/go-autorest/logger # github.com/Azure/go-autorest/tracing v0.6.0 github.com/Azure/go-autorest/tracing # github.com/MakeNowJust/heredoc v1.0.0 +## explicit github.com/MakeNowJust/heredoc # github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/purell @@ -164,16 +165,17 @@ github.com/modern-go/concurrent github.com/modern-go/reflect2 # github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 github.com/monochromegane/go-gitignore -# github.com/openshift/api v0.0.0-20210816181336-8ff39b776da3 +# github.com/openshift/api v0.0.0-20211103131146-a0807454a876 ## explicit github.com/openshift/api/config/v1 github.com/openshift/api/image/docker10 github.com/openshift/api/image/dockerpre012 github.com/openshift/api/image/v1 +github.com/openshift/api/machine/v1beta1 github.com/openshift/api/operator/v1 github.com/openshift/api/operator/v1alpha1 github.com/openshift/api/route/v1 -# github.com/openshift/client-go v0.0.0-20210730113412-1811c1b3fc0e +# github.com/openshift/client-go v0.0.0-20211025111749-96ca2abfc56c ## explicit github.com/openshift/client-go/config/clientset/versioned github.com/openshift/client-go/config/clientset/versioned/fake @@ -182,6 +184,8 @@ github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake github.com/openshift/client-go/image/clientset/versioned/scheme github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 +github.com/openshift/client-go/machine/clientset/versioned/scheme +github.com/openshift/client-go/machine/clientset/versioned/typed/machine/v1beta1 github.com/openshift/client-go/operator/clientset/versioned github.com/openshift/client-go/operator/clientset/versioned/fake github.com/openshift/client-go/operator/clientset/versioned/scheme @@ -191,21 +195,12 @@ github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alp github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1alpha1/fake github.com/openshift/client-go/route/clientset/versioned/scheme github.com/openshift/client-go/route/clientset/versioned/typed/route/v1 -# github.com/openshift/cluster-api-provider-gcp v0.0.1-0.20210615203611-a02074e8d5bb -github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1 # github.com/openshift/library-go v0.0.0-20210811133500-5e31383de2a7 ## explicit github.com/openshift/library-go/pkg/image/imageutil github.com/openshift/library-go/pkg/image/internal/digest github.com/openshift/library-go/pkg/image/internal/reference github.com/openshift/library-go/pkg/image/reference -# github.com/openshift/machine-api-operator v0.2.1-0.20210820103535-d50698c302f5 -## explicit -github.com/openshift/machine-api-operator/pkg/apis/machine -github.com/openshift/machine-api-operator/pkg/apis/machine/v1beta1 -github.com/openshift/machine-api-operator/pkg/apis/vsphereprovider/v1beta1 -github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/scheme -github.com/openshift/machine-api-operator/pkg/generated/clientset/versioned/typed/machine/v1beta1 # github.com/operator-framework/api v0.10.5 ## explicit github.com/operator-framework/api/pkg/operators/v2 @@ -249,6 +244,7 @@ github.com/prometheus/procfs/internal/util # github.com/russross/blackfriday v1.5.2 github.com/russross/blackfriday # github.com/spf13/cobra v1.2.1 +## explicit github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit @@ -337,6 +333,8 @@ golang.org/x/text/unicode/norm golang.org/x/text/width # golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac golang.org/x/time/rate +# golang.org/x/tools v0.1.5 +## explicit # gomodules.xyz/jsonpatch/v2 v2.2.0 gomodules.xyz/jsonpatch/v2 # google.golang.org/appengine v1.6.7 @@ -450,7 +448,6 @@ k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme -k8s.io/apimachinery/pkg/apis/meta/v1/validation k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams @@ -662,17 +659,12 @@ k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation # k8s.io/utils v0.0.0-20210802155522-efc7438f0176 +## explicit k8s.io/utils/buffer k8s.io/utils/exec k8s.io/utils/integer k8s.io/utils/pointer k8s.io/utils/trace -# sigs.k8s.io/cluster-api-provider-aws v0.0.0-00010101000000-000000000000 => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210622023641-c69a3acaee27 -## explicit -sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1 -# sigs.k8s.io/cluster-api-provider-azure v0.0.0-00010101000000-000000000000 => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210816141152-a7c40345b994 -## explicit -sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1 # sigs.k8s.io/controller-runtime v0.9.6 ## explicit sigs.k8s.io/controller-runtime @@ -799,5 +791,3 @@ sigs.k8s.io/structured-merge-diff/v4/typed sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml -# sigs.k8s.io/cluster-api-provider-aws => github.com/openshift/cluster-api-provider-aws v0.2.1-0.20210622023641-c69a3acaee27 -# sigs.k8s.io/cluster-api-provider-azure => github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210816141152-a7c40345b994 diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/LICENSE b/vendor/sigs.k8s.io/cluster-api-provider-aws/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-aws/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/awsproviderstatus_types.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/awsproviderstatus_types.go deleted file mode 100644 index 84f40cee6..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/awsproviderstatus_types.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. -// It contains AWS-specific status information. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type AWSMachineProviderStatus struct { - metav1.TypeMeta `json:",inline"` - - // InstanceID is the instance ID of the machine created in AWS - // +optional - InstanceID *string `json:"instanceId,omitempty"` - - // InstanceState is the state of the AWS instance for this machine - // +optional - InstanceState *string `json:"instanceState,omitempty"` - - // Conditions is a set of conditions associated with the Machine to indicate - // errors or other status - Conditions []AWSMachineProviderCondition `json:"conditions,omitempty"` -} - -// AWSMachineProviderConditionType is a valid value for AWSMachineProviderCondition.Type -type AWSMachineProviderConditionType string - -// Valid conditions for an AWS machine instance. -const ( - // MachineCreation indicates whether the machine has been created or not. If not, - // it should include a reason and message for the failure. - MachineCreation AWSMachineProviderConditionType = "MachineCreation" -) - -// AWSMachineProviderConditionReason is reason for the condition's last transition. -type AWSMachineProviderConditionReason string - -const ( - // MachineCreationSucceeded indicates machine creation success. - MachineCreationSucceeded AWSMachineProviderConditionReason = "MachineCreationSucceeded" - // MachineCreationFailed indicates machine creation failure. - MachineCreationFailed AWSMachineProviderConditionReason = "MachineCreationFailed" -) - -// AWSMachineProviderCondition is a condition in a AWSMachineProviderStatus. -type AWSMachineProviderCondition struct { - // Type is the type of the condition. - Type AWSMachineProviderConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason AWSMachineProviderConditionReason `json:"reason,omitempty"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/doc.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/doc.go deleted file mode 100644 index b23498c73..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1beta1 contains API Schema definitions for the awsproviderconfig v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig -// +k8s:defaulter-gen=TypeMeta -// +groupName=awsproviderconfig.openshift.io -package v1beta1 diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/register.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/register.go deleted file mode 100644 index 67107f8aa..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/register.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// NOTE: Boilerplate only. Ignore this file. - -// Package v1beta1 contains API Schema definitions for the awsproviderconfig v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsproviderconfig -// +k8s:defaulter-gen=TypeMeta -// +groupName=awsproviderconfig.k8s.io -package v1beta1 - -import ( - "encoding/json" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog/v2" - "sigs.k8s.io/controller-runtime/pkg/scheme" - "sigs.k8s.io/yaml" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "awsproviderconfig.openshift.io", Version: "v1beta1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) - -// RawExtensionFromProviderSpec marshals the machine provider spec. -func RawExtensionFromProviderSpec(spec *AWSMachineProviderConfig) (*runtime.RawExtension, error) { - if spec == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - if rawBytes, err = json.Marshal(spec); err != nil { - return nil, fmt.Errorf("error marshalling providerSpec: %v", err) - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -// RawExtensionFromProviderStatus marshals the machine provider status -func RawExtensionFromProviderStatus(status *AWSMachineProviderStatus) (*runtime.RawExtension, error) { - if status == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - if rawBytes, err = json.Marshal(status); err != nil { - return nil, fmt.Errorf("error marshalling providerStatus: %v", err) - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -// ProviderSpecFromRawExtension unmarshals a raw extension into an AWSMachineProviderSpec type -func ProviderSpecFromRawExtension(rawExtension *runtime.RawExtension) (*AWSMachineProviderConfig, error) { - if rawExtension == nil { - return &AWSMachineProviderConfig{}, nil - } - - spec := new(AWSMachineProviderConfig) - if err := yaml.Unmarshal(rawExtension.Raw, &spec); err != nil { - return nil, fmt.Errorf("error unmarshalling providerSpec: %v", err) - } - - klog.V(5).Infof("Got provider Spec from raw extension: %+v", spec) - return spec, nil -} - -// ProviderStatusFromRawExtension unmarshals a raw extension into an AWSMachineProviderStatus type -func ProviderStatusFromRawExtension(rawExtension *runtime.RawExtension) (*AWSMachineProviderStatus, error) { - if rawExtension == nil { - return &AWSMachineProviderStatus{}, nil - } - - providerStatus := new(AWSMachineProviderStatus) - if err := yaml.Unmarshal(rawExtension.Raw, providerStatus); err != nil { - return nil, fmt.Errorf("error unmarshalling providerStatus: %v", err) - } - - klog.V(5).Infof("Got provider Status from raw extension: %+v", providerStatus) - return providerStatus, nil -} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index ff15c74e6..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-aws/pkg/apis/awsprovider/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,391 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineProviderCondition) DeepCopyInto(out *AWSMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderCondition. -func (in *AWSMachineProviderCondition) DeepCopy() *AWSMachineProviderCondition { - if in == nil { - return nil - } - out := new(AWSMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.AMI.DeepCopyInto(&out.AMI) - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]TagSpecification, len(*in)) - copy(*out, *in) - } - if in.IAMInstanceProfile != nil { - in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile - *out = new(AWSResourceReference) - (*in).DeepCopyInto(*out) - } - if in.UserDataSecret != nil { - in, out := &in.UserDataSecret, &out.UserDataSecret - *out = new(v1.LocalObjectReference) - **out = **in - } - if in.CredentialsSecret != nil { - in, out := &in.CredentialsSecret, &out.CredentialsSecret - *out = new(v1.LocalObjectReference) - **out = **in - } - if in.KeyName != nil { - in, out := &in.KeyName, &out.KeyName - *out = new(string) - **out = **in - } - if in.PublicIP != nil { - in, out := &in.PublicIP, &out.PublicIP - *out = new(bool) - **out = **in - } - if in.SecurityGroups != nil { - in, out := &in.SecurityGroups, &out.SecurityGroups - *out = make([]AWSResourceReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Subnet.DeepCopyInto(&out.Subnet) - out.Placement = in.Placement - if in.LoadBalancers != nil { - in, out := &in.LoadBalancers, &out.LoadBalancers - *out = make([]LoadBalancerReference, len(*in)) - copy(*out, *in) - } - if in.BlockDevices != nil { - in, out := &in.BlockDevices, &out.BlockDevices - *out = make([]BlockDeviceMappingSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SpotMarketOptions != nil { - in, out := &in.SpotMarketOptions, &out.SpotMarketOptions - *out = new(SpotMarketOptions) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfig. -func (in *AWSMachineProviderConfig) DeepCopy() *AWSMachineProviderConfig { - if in == nil { - return nil - } - out := new(AWSMachineProviderConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSMachineProviderConfig) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineProviderConfigList) DeepCopyInto(out *AWSMachineProviderConfigList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AWSMachineProviderConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfigList. -func (in *AWSMachineProviderConfigList) DeepCopy() *AWSMachineProviderConfigList { - if in == nil { - return nil - } - out := new(AWSMachineProviderConfigList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSMachineProviderConfigList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.InstanceID != nil { - in, out := &in.InstanceID, &out.InstanceID - *out = new(string) - **out = **in - } - if in.InstanceState != nil { - in, out := &in.InstanceState, &out.InstanceState - *out = new(string) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]AWSMachineProviderCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderStatus. -func (in *AWSMachineProviderStatus) DeepCopy() *AWSMachineProviderStatus { - if in == nil { - return nil - } - out := new(AWSMachineProviderStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AWSMachineProviderStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { - *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - if in.ARN != nil { - in, out := &in.ARN, &out.ARN - *out = new(string) - **out = **in - } - if in.Filters != nil { - in, out := &in.Filters, &out.Filters - *out = make([]Filter, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. -func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { - if in == nil { - return nil - } - out := new(AWSResourceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BlockDeviceMappingSpec) DeepCopyInto(out *BlockDeviceMappingSpec) { - *out = *in - if in.DeviceName != nil { - in, out := &in.DeviceName, &out.DeviceName - *out = new(string) - **out = **in - } - if in.EBS != nil { - in, out := &in.EBS, &out.EBS - *out = new(EBSBlockDeviceSpec) - (*in).DeepCopyInto(*out) - } - if in.NoDevice != nil { - in, out := &in.NoDevice, &out.NoDevice - *out = new(string) - **out = **in - } - if in.VirtualName != nil { - in, out := &in.VirtualName, &out.VirtualName - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingSpec. -func (in *BlockDeviceMappingSpec) DeepCopy() *BlockDeviceMappingSpec { - if in == nil { - return nil - } - out := new(BlockDeviceMappingSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { - *out = *in - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination - *out = new(bool) - **out = **in - } - if in.Encrypted != nil { - in, out := &in.Encrypted, &out.Encrypted - *out = new(bool) - **out = **in - } - in.KMSKey.DeepCopyInto(&out.KMSKey) - if in.Iops != nil { - in, out := &in.Iops, &out.Iops - *out = new(int64) - **out = **in - } - if in.VolumeSize != nil { - in, out := &in.VolumeSize, &out.VolumeSize - *out = new(int64) - **out = **in - } - if in.VolumeType != nil { - in, out := &in.VolumeType, &out.VolumeType - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceSpec. -func (in *EBSBlockDeviceSpec) DeepCopy() *EBSBlockDeviceSpec { - if in == nil { - return nil - } - out := new(EBSBlockDeviceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Filter) DeepCopyInto(out *Filter) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. -func (in *Filter) DeepCopy() *Filter { - if in == nil { - return nil - } - out := new(Filter) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerReference) DeepCopyInto(out *LoadBalancerReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerReference. -func (in *LoadBalancerReference) DeepCopy() *LoadBalancerReference { - if in == nil { - return nil - } - out := new(LoadBalancerReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Placement) DeepCopyInto(out *Placement) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. -func (in *Placement) DeepCopy() *Placement { - if in == nil { - return nil - } - out := new(Placement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) { - *out = *in - if in.MaxPrice != nil { - in, out := &in.MaxPrice, &out.MaxPrice - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions. -func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions { - if in == nil { - return nil - } - out := new(SpotMarketOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TagSpecification) DeepCopyInto(out *TagSpecification) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecification. -func (in *TagSpecification) DeepCopy() *TagSpecification { - if in == nil { - return nil - } - out := new(TagSpecification) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-azure/LICENSE b/vendor/sigs.k8s.io/cluster-api-provider-azure/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-azure/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/azuremachineproviderconfig_types.go b/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/azuremachineproviderconfig_types.go deleted file mode 100644 index 8e4d10394..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/azuremachineproviderconfig_types.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Annotation constants -const ( - // ClusterIDLabel is the label that a machineset must have to identify the - // cluster to which it belongs. - ClusterIDLabel = "machine.openshift.io/cluster-api-cluster" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field -// for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. -// Required parameters such as location that are not specified by this configuration, will be defaulted -// by the actuator. -// TODO: Update type -// +k8s:openapi-gen=true -type AzureMachineProviderSpec struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // UserDataSecret contains a local reference to a secret that contains the - // UserData to apply to the instance - UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` - - // CredentialsSecret is a reference to the secret with Azure credentials. - CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"` - - Location string `json:"location,omitempty"` - VMSize string `json:"vmSize,omitempty"` - Image Image `json:"image"` - OSDisk OSDisk `json:"osDisk"` - SSHPublicKey string `json:"sshPublicKey,omitempty"` - PublicIP bool `json:"publicIP"` - Tags map[string]string `json:"tags,omitempty"` - - // Network Security Group that needs to be attached to the machine's interface. - // No security group will be attached if empty. - SecurityGroup string `json:"securityGroup,omitempty"` - - // Application Security Groups that need to be attached to the machine's interface. - // No application security groups will be attached if zero-length. - ApplicationSecurityGroups []string `json:"applicationSecurityGroups,omitempty"` - - // Subnet to use for this instance - Subnet string `json:"subnet"` - - // PublicLoadBalancer to use for this instance - PublicLoadBalancer string `json:"publicLoadBalancer,omitempty"` - - // InternalLoadBalancerName to use for this instance - InternalLoadBalancer string `json:"internalLoadBalancer,omitempty"` - - // NatRule to set inbound NAT rule of the load balancer - NatRule *int `json:"natRule,omitempty"` - - // ManagedIdentity to set managed identity name - ManagedIdentity string `json:"managedIdentity,omitempty"` - - // Vnet to set virtual network name - Vnet string `json:"vnet,omitempty"` - - // Availability Zone for the virtual machine. - // If nil, the virtual machine should be deployed to no zone - Zone *string `json:"zone,omitempty"` - - NetworkResourceGroup string `json:"networkResourceGroup,omitempty"` - ResourceGroup string `json:"resourceGroup,omitempty"` - - // SpotVMOptions allows the ability to specify the Machine should use a Spot VM - SpotVMOptions *SpotVMOptions `json:"spotVMOptions,omitempty"` - - // SecurityProfile specifies the Security profile settings for a virtual machine. - // +optional - SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` -} - -// SpotVMOptions defines the options relevant to running the Machine on Spot VMs -type SpotVMOptions struct { - // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances - MaxPrice *resource.Quantity `json:"maxPrice,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -func init() { - SchemeBuilder.Register(&AzureMachineProviderSpec{}) -} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/azuremachineproviderstatus_types.go b/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/azuremachineproviderstatus_types.go deleted file mode 100644 index 00eac86c5..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/azuremachineproviderstatus_types.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. -// It contains Azure-specific status information. -// +k8s:openapi-gen=true -type AzureMachineProviderStatus struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - // VMID is the ID of the virtual machine created in Azure. - // +optional - VMID *string `json:"vmId,omitempty"` - - // VMState is the provisioning state of the Azure virtual machine. - // +optional - VMState *VMState `json:"vmState,omitempty"` - - // Conditions is a set of conditions associated with the Machine to indicate - // errors or other status. - // +optional - Conditions []AzureMachineProviderCondition `json:"conditions,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -func init() { - SchemeBuilder.Register(&AzureMachineProviderStatus{}) -} - -// SubnetSpec configures an Azure subnet. -type SubnetSpec struct { - // ID defines a unique identifier to reference this resource. - ID string `json:"id,omitempty"` - - // Name defines a name for the subnet resource. - Name string `json:"name"` - - // VnetID defines the ID of the virtual network this subnet should be built in. - VnetID string `json:"vnetId"` - - // CidrBlock is the CIDR block to be used when the provider creates a managed Vnet. - CidrBlock string `json:"cidrBlock,omitempty"` - - // SecurityGroup defines the NSG (network security group) that should be attached to this subnet. - SecurityGroup SecurityGroup `json:"securityGroup"` - - // Tags is a collection of tags describing the resource. - // TODO: Uncomment once tagging is implemented. - //Tags tags.Map `json:"tags,omitempty"` -} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/doc.go b/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/doc.go deleted file mode 100644 index 45bb25201..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1beta1 contains API Schema definitions for the azureprovider v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider -// +k8s:defaulter-gen=TypeMeta -// +groupName=azureproviderconfig.openshift.io -package v1beta1 diff --git a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/register.go b/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/register.go deleted file mode 100644 index c7d4921dd..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/register.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// NOTE: Boilerplate only. Ignore this file. - -// Package v1beta1 contains API Schema definitions for the azureprovider v1beta1 API group -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider -// +k8s:defaulter-gen=TypeMeta -// +groupName=azureproviderconfig.openshift.io -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/json" - "sigs.k8s.io/controller-runtime/pkg/scheme" - "sigs.k8s.io/yaml" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "azureproviderconfig.openshift.io", Version: "v1beta1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) - -// MachineStatusFromProviderStatus unmarshals a raw extension into an Azure machine type -func MachineStatusFromProviderStatus(extension *runtime.RawExtension) (*AzureMachineProviderStatus, error) { - if extension == nil { - return &AzureMachineProviderStatus{}, nil - } - - status := new(AzureMachineProviderStatus) - if err := yaml.Unmarshal(extension.Raw, status); err != nil { - return nil, err - } - - return status, nil -} - -// EncodeMachineStatus marshals the machine status -func EncodeMachineStatus(status *AzureMachineProviderStatus) (*runtime.RawExtension, error) { - if status == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - - // TODO: use apimachinery conversion https://godoc.org/k8s.io/apimachinery/pkg/runtime#Convert_runtime_Object_To_runtime_RawExtension - if rawBytes, err = json.Marshal(status); err != nil { - return nil, err - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} - -// EncodeMachineSpec marshals the machine provider spec. -func EncodeMachineSpec(spec *AzureMachineProviderSpec) (*runtime.RawExtension, error) { - if spec == nil { - return &runtime.RawExtension{}, nil - } - - var rawBytes []byte - var err error - - // TODO: use apimachinery conversion https://godoc.org/k8s.io/apimachinery/pkg/runtime#Convert_runtime_Object_To_runtime_RawExtension - if rawBytes, err = json.Marshal(spec); err != nil { - return nil, err - } - - return &runtime.RawExtension{ - Raw: rawBytes, - }, nil -} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/types.go b/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/types.go deleted file mode 100644 index b59fe1a2b..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/types.go +++ /dev/null @@ -1,427 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// TODO: Write type tests - -// AzureResourceReference is a reference to a specific Azure resource by ID -type AzureResourceReference struct { - // ID of resource - // +optional - ID *string `json:"id,omitempty"` - // TODO: Investigate if we should reference resources in other ways -} - -// TODO: Investigate resource filters - -// AzureMachineProviderConditionType is a valid value for AzureMachineProviderCondition.Type -type AzureMachineProviderConditionType string - -// Valid conditions for an Azure machine instance -const ( - // MachineCreated indicates whether the machine has been created or not. If not, - // it should include a reason and message for the failure. - MachineCreated AzureMachineProviderConditionType = "MachineCreated" -) - -// AzureMachineProviderCondition is a condition in a AzureMachineProviderStatus -type AzureMachineProviderCondition struct { - // Type is the type of the condition. - Type AzureMachineProviderConditionType `json:"type"` - // Status is the status of the condition. - Status corev1.ConditionStatus `json:"status"` - // LastProbeTime is the last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime"` - // LastTransitionTime is the last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime"` - // Reason is a unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason"` - // Message is a human-readable message indicating details about last transition. - // +optional - Message string `json:"message"` -} - -const ( - // ControlPlane machine label - ControlPlane string = "master" - // Node machine label - Node string = "worker" - // MachineRoleLabel machine label to determine the role - MachineRoleLabel = "machine.openshift.io/cluster-api-machine-role" -) - -// Network encapsulates Azure networking resources. -type Network struct { - // SecurityGroups is a map from the role/kind of the security group to its unique name, if any. - SecurityGroups map[SecurityGroupRole]*SecurityGroup `json:"securityGroups,omitempty"` - - // APIServerLB is the Kubernetes API server load balancer. - APIServerLB LoadBalancer `json:"apiServerLb,omitempty"` - - // APIServerIP is the Kubernetes API server public IP address. - APIServerIP PublicIP `json:"apiServerIp,omitempty"` -} - -// TODO: Implement tagging -/* -// Tags defines resource tags. -type Tags map[string]*string -*/ - -// Subnets is a slice of Subnet. -type Subnets []*SubnetSpec - -// TODO -// ToMap returns a map from id to subnet. -func (s Subnets) ToMap() map[string]*SubnetSpec { - res := make(map[string]*SubnetSpec) - for _, x := range s { - res[x.ID] = x - } - return res -} - -// SecurityGroupRole defines the unique role of a security group. -type SecurityGroupRole string - -var ( - // SecurityGroupBastion defines an SSH bastion role - SecurityGroupBastion = SecurityGroupRole("bastion") - - // SecurityGroupNode defines a Kubernetes workload node role - SecurityGroupNode = SecurityGroupRole(Node) - - // SecurityGroupControlPlane defines a Kubernetes control plane node role - SecurityGroupControlPlane = SecurityGroupRole(ControlPlane) -) - -// SecurityGroup defines an Azure security group. -type SecurityGroup struct { - ID string `json:"id"` - Name string `json:"name"` - IngressRules IngressRules `json:"ingressRule"` - // TODO: Uncomment once tagging is implemented. - //Tags *Tags `json:"tags"` -} - -/* -// TODO -// String returns a string representation of the security group. -func (s *SecurityGroup) String() string { - return fmt.Sprintf("id=%s/name=%s", s.ID, s.Name) -} -*/ - -// SecurityGroupProtocol defines the protocol type for a security group rule. -type SecurityGroupProtocol string - -var ( - // SecurityGroupProtocolAll is a wildcard for all IP protocols - SecurityGroupProtocolAll = SecurityGroupProtocol("*") - - // SecurityGroupProtocolTCP represents the TCP protocol in ingress rules - SecurityGroupProtocolTCP = SecurityGroupProtocol("Tcp") - - // SecurityGroupProtocolUDP represents the UDP protocol in ingress rules - SecurityGroupProtocolUDP = SecurityGroupProtocol("Udp") -) - -// TODO -// IngressRule defines an Azure ingress rule for security groups. -type IngressRule struct { - Description string `json:"description"` - Protocol SecurityGroupProtocol `json:"protocol"` - - // SourcePorts - The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. - SourcePorts *string `json:"sourcePorts,omitempty"` - - // DestinationPorts - The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports. - DestinationPorts *string `json:"destinationPorts,omitempty"` - - // Source - The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from. - Source *string `json:"source,omitempty"` - - // Destination - The destination address prefix. CIDR or destination IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. - Destination *string `json:"destination,omitempty"` -} - -// TODO -// String returns a string representation of the ingress rule. -/* -func (i *IngressRule) String() string { - return fmt.Sprintf("protocol=%s/range=[%d-%d]/description=%s", i.Protocol, i.FromPort, i.ToPort, i.Description) -} -*/ - -// TODO -// IngressRules is a slice of Azure ingress rules for security groups. -type IngressRules []*IngressRule - -// TODO -// Difference returns the difference between this slice and the other slice. -/* -func (i IngressRules) Difference(o IngressRules) (out IngressRules) { - for _, x := range i { - found := false - for _, y := range o { - sort.Strings(x.CidrBlocks) - sort.Strings(y.CidrBlocks) - sort.Strings(x.SourceSecurityGroupIDs) - sort.Strings(y.SourceSecurityGroupIDs) - if reflect.DeepEqual(x, y) { - found = true - break - } - } - - if !found { - out = append(out, x) - } - } - - return -} -*/ - -// PublicIP defines an Azure public IP address. -// TODO: Remove once load balancer is implemented. -type PublicIP struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - IPAddress string `json:"ipAddress,omitempty"` - DNSName string `json:"dnsName,omitempty"` -} - -// TODO -// LoadBalancer defines an Azure load balancer. -type LoadBalancer struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - SKU SKU `json:"sku,omitempty"` - FrontendIPConfig FrontendIPConfig `json:"frontendIpConfig,omitempty"` - BackendPool BackendPool `json:"backendPool,omitempty"` - // TODO: Uncomment once tagging is implemented. - //Tags Tags `json:"tags,omitempty"` - /* - // FrontendIPConfigurations - Object representing the frontend IPs to be used for the load balancer - FrontendIPConfigurations *[]FrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` - // BackendAddressPools - Collection of backend address pools used by a load balancer - BackendAddressPools *[]BackendAddressPool `json:"backendAddressPools,omitempty"` - // LoadBalancingRules - Object collection representing the load balancing rules Gets the provisioning - LoadBalancingRules *[]LoadBalancingRule `json:"loadBalancingRules,omitempty"` - // Probes - Collection of probe objects used in the load balancer - Probes *[]Probe `json:"probes,omitempty"` - // InboundNatRules - Collection of inbound NAT Rules used by a load balancer. Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool. They have to reference individual inbound NAT rules. - InboundNatRules *[]InboundNatRule `json:"inboundNatRules,omitempty"` - // InboundNatPools - Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules. Inbound NAT pools are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an inbound NAT pool. They have to reference individual inbound NAT rules. - InboundNatPools *[]InboundNatPool `json:"inboundNatPools,omitempty"` - // OutboundRules - The outbound rules. - OutboundRules *[]OutboundRule `json:"outboundRules,omitempty"` - // ResourceGUID - The resource GUID property of the load balancer resource. - ResourceGUID *string `json:"resourceGuid,omitempty"` - // ProvisioningState - Gets the provisioning state of the PublicIP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. - ProvisioningState *string `json:"provisioningState,omitempty"` - */ -} - -// LoadBalancerSKU enumerates the values for load balancer sku name. -type SKU string - -var ( - SKUBasic = SKU("Basic") - SKUStandard = SKU("Standard") -) - -type FrontendIPConfig struct { - /* - // FrontendIPConfigurationPropertiesFormat - Properties of the load balancer probe. - *FrontendIPConfigurationPropertiesFormat `json:"properties,omitempty"` - // Name - The name of the resource that is unique within a resource group. This name can be used to access the resource. - Name *string `json:"name,omitempty"` - // Etag - A unique read-only string that changes whenever the resource is updated. - Etag *string `json:"etag,omitempty"` - // Zones - A list of availability zones denoting the IP allocated for the resource needs to come from. - Zones *[]string `json:"zones,omitempty"` - // ID - Resource ID. - ID *string `json:"id,omitempty"` - */ -} - -type BackendPool struct { - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` -} - -// TODO -// LoadBalancerProtocol defines listener protocols for a load balancer. -type LoadBalancerProtocol string - -// TODO -var ( - // LoadBalancerProtocolTCP defines the LB API string representing the TCP protocol - LoadBalancerProtocolTCP = LoadBalancerProtocol("TCP") - - // LoadBalancerProtocolSSL defines the LB API string representing the TLS protocol - LoadBalancerProtocolSSL = LoadBalancerProtocol("SSL") - - // LoadBalancerProtocolHTTP defines the LB API string representing the HTTP protocol at L7 - LoadBalancerProtocolHTTP = LoadBalancerProtocol("HTTP") - - // LoadBalancerProtocolHTTPS defines the LB API string representing the HTTP protocol at L7 - LoadBalancerProtocolHTTPS = LoadBalancerProtocol("HTTPS") -) - -// TODO -// LoadBalancerListener defines an Azure load balancer listener. -type LoadBalancerListener struct { - Protocol LoadBalancerProtocol `json:"protocol"` - Port int64 `json:"port"` - InstanceProtocol LoadBalancerProtocol `json:"instanceProtocol"` - InstancePort int64 `json:"instancePort"` -} - -// TODO -// LoadBalancerHealthCheck defines an Azure load balancer health check. -type LoadBalancerHealthCheck struct { - Target string `json:"target"` - Interval time.Duration `json:"interval"` - Timeout time.Duration `json:"timeout"` - HealthyThreshold int64 `json:"healthyThreshold"` - UnhealthyThreshold int64 `json:"unhealthyThreshold"` -} - -// VMState describes the state of an Azure virtual machine. -type VMState string - -var ( - // ProvisioningState related values - // VMStateCreating ... - VMStateCreating = VMState("Creating") - // VMStateDeleting ... - VMStateDeleting = VMState("Deleting") - // VMStateFailed ... - VMStateFailed = VMState("Failed") - // VMStateMigrating ... - VMStateMigrating = VMState("Migrating") - // VMStateSucceeded ... - VMStateSucceeded = VMState("Succeeded") - // VMStateUpdating ... - VMStateUpdating = VMState("Updating") - - // PowerState related values - // VMStateStarting ... - VMStateStarting = VMState("Starting") - // VMStateRunning ... - VMStateRunning = VMState("Running") - // VMStateStopping ... - VMStateStopping = VMState("Stopping") - // VMStateStopped ... - VMStateStopped = VMState("Stopped") - // VMStateDeallocating ... - VMStateDeallocating = VMState("Deallocating") - // VMStateDeallocated ... - VMStateDeallocated = VMState("Deallocated") - - // VMStateUnknown ... - VMStateUnknown = VMState("Unknown") -) - -// VM describes an Azure virtual machine. -type VM struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - - // Hardware profile - VMSize string `json:"vmSize,omitempty"` - - // Storage profile - Image Image `json:"image,omitempty"` - OSDisk OSDisk `json:"osDisk,omitempty"` - - StartupScript string `json:"startupScript,omitempty"` - - // State - The provisioning state, which only appears in the response. - State VMState `json:"vmState,omitempty"` - Identity VMIdentity `json:"identity,omitempty"` - - // TODO: Uncomment once tagging is implemented. - //Tags Tags `json:"tags,omitempty"` - - // HardwareProfile - Specifies the hardware settings for the virtual machine. - //HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` - - // StorageProfile - Specifies the storage settings for the virtual machine disks. - //StorageProfile *StorageProfile `json:"storageProfile,omitempty"` - - // AdditionalCapabilities - Specifies additional capabilities enabled or disabled on the virtual machine. - //AdditionalCapabilities *AdditionalCapabilities `json:"additionalCapabilities,omitempty"` - - // OsProfile - Specifies the operating system settings for the virtual machine. - //OsProfile *OSProfile `json:"osProfile,omitempty"` - // NetworkProfile - Specifies the network interfaces of the virtual machine. - //NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` - - //AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` -} - -// Image is a mirror of azure sdk compute.ImageReference -type Image struct { - // Fields below refer to os images in marketplace - Publisher string `json:"publisher"` - Offer string `json:"offer"` - SKU string `json:"sku"` - Version string `json:"version"` - // ResourceID represents the location of OS Image in azure subscription - ResourceID string `json:"resourceID"` -} - -// VMIdentity defines the identity of the virtual machine, if configured. -type VMIdentity string - -type OSDisk struct { - OSType string `json:"osType"` - ManagedDisk ManagedDiskParameters `json:"managedDisk"` - DiskSizeGB int32 `json:"diskSizeGB"` -} - -type ManagedDiskParameters struct { - StorageAccountType string `json:"storageAccountType"` - DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` -} - -type DiskEncryptionSetParameters struct { - ID string `json:"id,omitempty"` -} - -// SecurityProfile specifies the Security profile settings for a -// virtual machine or virtual machine scale set. -type SecurityProfile struct { - // This field indicates whether Host Encryption should be enabled - // or disabled for a virtual machine or virtual machine scale - // set. Default is disabled. - EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` -} diff --git a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index f42d493a0..000000000 --- a/vendor/sigs.k8s.io/cluster-api-provider-azure/pkg/apis/azureprovider/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,571 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by main. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineProviderCondition) DeepCopyInto(out *AzureMachineProviderCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderCondition. -func (in *AzureMachineProviderCondition) DeepCopy() *AzureMachineProviderCondition { - if in == nil { - return nil - } - out := new(AzureMachineProviderCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineProviderSpec) DeepCopyInto(out *AzureMachineProviderSpec) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.UserDataSecret != nil { - in, out := &in.UserDataSecret, &out.UserDataSecret - *out = new(v1.SecretReference) - **out = **in - } - if in.CredentialsSecret != nil { - in, out := &in.CredentialsSecret, &out.CredentialsSecret - *out = new(v1.SecretReference) - **out = **in - } - out.Image = in.Image - in.OSDisk.DeepCopyInto(&out.OSDisk) - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ApplicationSecurityGroups != nil { - in, out := &in.ApplicationSecurityGroups, &out.ApplicationSecurityGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NatRule != nil { - in, out := &in.NatRule, &out.NatRule - *out = new(int) - **out = **in - } - if in.Zone != nil { - in, out := &in.Zone, &out.Zone - *out = new(string) - **out = **in - } - if in.SpotVMOptions != nil { - in, out := &in.SpotVMOptions, &out.SpotVMOptions - *out = new(SpotVMOptions) - (*in).DeepCopyInto(*out) - } - if in.SecurityProfile != nil { - in, out := &in.SecurityProfile, &out.SecurityProfile - *out = new(SecurityProfile) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderSpec. -func (in *AzureMachineProviderSpec) DeepCopy() *AzureMachineProviderSpec { - if in == nil { - return nil - } - out := new(AzureMachineProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AzureMachineProviderSpec) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureMachineProviderStatus) DeepCopyInto(out *AzureMachineProviderStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.VMID != nil { - in, out := &in.VMID, &out.VMID - *out = new(string) - **out = **in - } - if in.VMState != nil { - in, out := &in.VMState, &out.VMState - *out = new(VMState) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]AzureMachineProviderCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderStatus. -func (in *AzureMachineProviderStatus) DeepCopy() *AzureMachineProviderStatus { - if in == nil { - return nil - } - out := new(AzureMachineProviderStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AzureMachineProviderStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureResourceReference) DeepCopyInto(out *AzureResourceReference) { - *out = *in - if in.ID != nil { - in, out := &in.ID, &out.ID - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceReference. -func (in *AzureResourceReference) DeepCopy() *AzureResourceReference { - if in == nil { - return nil - } - out := new(AzureResourceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BackendPool) DeepCopyInto(out *BackendPool) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackendPool. -func (in *BackendPool) DeepCopy() *BackendPool { - if in == nil { - return nil - } - out := new(BackendPool) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetParameters. -func (in *DiskEncryptionSetParameters) DeepCopy() *DiskEncryptionSetParameters { - if in == nil { - return nil - } - out := new(DiskEncryptionSetParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FrontendIPConfig) DeepCopyInto(out *FrontendIPConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontendIPConfig. -func (in *FrontendIPConfig) DeepCopy() *FrontendIPConfig { - if in == nil { - return nil - } - out := new(FrontendIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { - if in == nil { - return nil - } - out := new(Image) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressRule) DeepCopyInto(out *IngressRule) { - *out = *in - if in.SourcePorts != nil { - in, out := &in.SourcePorts, &out.SourcePorts - *out = new(string) - **out = **in - } - if in.DestinationPorts != nil { - in, out := &in.DestinationPorts, &out.DestinationPorts - *out = new(string) - **out = **in - } - if in.Source != nil { - in, out := &in.Source, &out.Source - *out = new(string) - **out = **in - } - if in.Destination != nil { - in, out := &in.Destination, &out.Destination - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. -func (in *IngressRule) DeepCopy() *IngressRule { - if in == nil { - return nil - } - out := new(IngressRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in IngressRules) DeepCopyInto(out *IngressRules) { - { - in := &in - *out = make(IngressRules, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(IngressRule) - (*in).DeepCopyInto(*out) - } - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRules. -func (in IngressRules) DeepCopy() IngressRules { - if in == nil { - return nil - } - out := new(IngressRules) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) { - *out = *in - out.FrontendIPConfig = in.FrontendIPConfig - out.BackendPool = in.BackendPool - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer. -func (in *LoadBalancer) DeepCopy() *LoadBalancer { - if in == nil { - return nil - } - out := new(LoadBalancer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerHealthCheck) DeepCopyInto(out *LoadBalancerHealthCheck) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerHealthCheck. -func (in *LoadBalancerHealthCheck) DeepCopy() *LoadBalancerHealthCheck { - if in == nil { - return nil - } - out := new(LoadBalancerHealthCheck) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerListener) DeepCopyInto(out *LoadBalancerListener) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerListener. -func (in *LoadBalancerListener) DeepCopy() *LoadBalancerListener { - if in == nil { - return nil - } - out := new(LoadBalancerListener) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedDiskParameters) DeepCopyInto(out *ManagedDiskParameters) { - *out = *in - if in.DiskEncryptionSet != nil { - in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet - *out = new(DiskEncryptionSetParameters) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDiskParameters. -func (in *ManagedDiskParameters) DeepCopy() *ManagedDiskParameters { - if in == nil { - return nil - } - out := new(ManagedDiskParameters) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Network) DeepCopyInto(out *Network) { - *out = *in - if in.SecurityGroups != nil { - in, out := &in.SecurityGroups, &out.SecurityGroups - *out = make(map[SecurityGroupRole]*SecurityGroup, len(*in)) - for key, val := range *in { - var outVal *SecurityGroup - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = new(SecurityGroup) - (*in).DeepCopyInto(*out) - } - (*out)[key] = outVal - } - } - out.APIServerLB = in.APIServerLB - out.APIServerIP = in.APIServerIP - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. -func (in *Network) DeepCopy() *Network { - if in == nil { - return nil - } - out := new(Network) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OSDisk) DeepCopyInto(out *OSDisk) { - *out = *in - in.ManagedDisk.DeepCopyInto(&out.ManagedDisk) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. -func (in *OSDisk) DeepCopy() *OSDisk { - if in == nil { - return nil - } - out := new(OSDisk) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PublicIP) DeepCopyInto(out *PublicIP) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIP. -func (in *PublicIP) DeepCopy() *PublicIP { - if in == nil { - return nil - } - out := new(PublicIP) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) { - *out = *in - if in.IngressRules != nil { - in, out := &in.IngressRules, &out.IngressRules - *out = make(IngressRules, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(IngressRule) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup. -func (in *SecurityGroup) DeepCopy() *SecurityGroup { - if in == nil { - return nil - } - out := new(SecurityGroup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityProfile) DeepCopyInto(out *SecurityProfile) { - *out = *in - if in.EncryptionAtHost != nil { - in, out := &in.EncryptionAtHost, &out.EncryptionAtHost - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfile. -func (in *SecurityProfile) DeepCopy() *SecurityProfile { - if in == nil { - return nil - } - out := new(SecurityProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SpotVMOptions) DeepCopyInto(out *SpotVMOptions) { - *out = *in - if in.MaxPrice != nil { - in, out := &in.MaxPrice, &out.MaxPrice - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotVMOptions. -func (in *SpotVMOptions) DeepCopy() *SpotVMOptions { - if in == nil { - return nil - } - out := new(SpotVMOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) { - *out = *in - in.SecurityGroup.DeepCopyInto(&out.SecurityGroup) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec. -func (in *SubnetSpec) DeepCopy() *SubnetSpec { - if in == nil { - return nil - } - out := new(SubnetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in Subnets) DeepCopyInto(out *Subnets) { - { - in := &in - *out = make(Subnets, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(SubnetSpec) - (*in).DeepCopyInto(*out) - } - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnets. -func (in Subnets) DeepCopy() Subnets { - if in == nil { - return nil - } - out := new(Subnets) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VM) DeepCopyInto(out *VM) { - *out = *in - out.Image = in.Image - in.OSDisk.DeepCopyInto(&out.OSDisk) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VM. -func (in *VM) DeepCopy() *VM { - if in == nil { - return nil - } - out := new(VM) - in.DeepCopyInto(out) - return out -}