diff --git a/config/v1/0000_10_config-operator_01_build.crd.yaml b/config/v1/0000_10_config-operator_01_build.crd.yaml index 961f4478b7f..8f758397123 100644 --- a/config/v1/0000_10_config-operator_01_build.crd.yaml +++ b/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -162,7 +162,7 @@ spec: description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP.' + status.podIP, status.podIPs.' type: object required: - fieldPath diff --git a/glide.lock b/glide.lock index b3d702c5abb..5e084765cc7 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 0cc412c8ccbe6bfefac4bc281cef4d2836b1845b7e1fd9b17497229851380eaa -updated: 2019-09-23T13:26:23.372446954-04:00 +hash: f52010a7aee8c603171499d6ff9ef07fc753f5beead2d8ed453a55ffc95e96d9 +updated: 2019-12-06T11:33:00.045911162-05:00 imports: - name: github.com/davecgh/go-spew version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 @@ -11,7 +11,7 @@ imports: - proto - sortkeys - name: github.com/golang/protobuf - version: b5d812f8a3706043e23a9cd5babf2e5423744d30 + version: 6c65a5562fc06764971b7c5d05c76c75e84bdbf7 subpackages: - proto - name: github.com/google/go-cmp @@ -25,22 +25,32 @@ imports: - name: github.com/google/gofuzz version: f140a6486e521aad38f5917de355cbf147cc0496 - name: github.com/json-iterator/go - version: 27518f6661eba504be5a7a9a9f6d9460d892ade3 + version: 03217c3e97663914aec3faafde50d081f197a0a2 - name: github.com/modern-go/concurrent version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 - name: github.com/modern-go/reflect2 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 +- name: github.com/openshift/library-go + version: 73e1fb871a9bf69c622e48daddc8b90f20466b39 + subpackages: + - alpha-build-machinery/make + - alpha-build-machinery/make/lib + - alpha-build-machinery/make/targets + - alpha-build-machinery/make/targets/golang + - alpha-build-machinery/make/targets/openshift + - alpha-build-machinery/make/targets/openshift/operator + - alpha-build-machinery/scripts - name: github.com/spf13/pflag - version: 298182f68c66c05229eb03ac171abe6e309ee79a + version: 2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab - name: golang.org/x/net - version: cdfb69ac37fc6fa907650654115ebebb3aae2087 + version: 13f9640d40b9cc418fb53703dfbd177679788ceb subpackages: - http/httpguts - http2 - http2/hpack - idna - name: golang.org/x/text - version: e6919f6577db79269a6443b9dc46d18f2238fb5d + version: 342b2e1fbaa52c93f31447ad2c6abc048c63e475 subpackages: - secure/bidirule - transform @@ -50,11 +60,11 @@ imports: version: 3d26580ed485bd4449873f6ca8ef1234e6f9ad45 repo: https://github.com/gonum/gonum.git - name: gopkg.in/inf.v0 - version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 + version: d2d2541c53f18d2a059457998ce2876cc8e67cbf - name: gopkg.in/yaml.v2 - version: 51d6538a90f86fe93ac480b35f37b2be17fef232 + version: f221b8435cfb71e54062f6c6e99e9ade30b124d5 - name: k8s.io/api - version: 95b840bb6a1f5f0462af804c8589396d294d4914 + version: 384b28a90b2b98f2743b87cb5d2bf5cbf99a4dcf subpackages: - admission/v1beta1 - admissionregistration/v1beta1 @@ -87,7 +97,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apimachinery - version: 27d36303b6556f377b4f34e64705fa9024a12b0c + version: 79c2a76c473a20cdc4ce59cae4b72529b5d9d16b subpackages: - pkg/api/apitesting - pkg/api/apitesting/fuzzer @@ -127,11 +137,11 @@ imports: - pkg/watch - third_party/forked/golang/reflect - name: k8s.io/code-generator - version: cd179ad6a2693011d6f2fa5cd64c6680ee99379f + version: e95606b614f049ef6087115cb340d8d5805b8da7 - name: k8s.io/gengo version: 26a664648505d962332bda642b27306bc10d1082 - name: k8s.io/klog - version: 3ca30a56d8a775276f9cdae009ba326fdc05af7f + version: 2ca9ad30301bf30a8a6e0fa2110db6b8df699a91 - name: sigs.k8s.io/yaml version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 testImports: [] diff --git a/glide.yaml b/glide.yaml index 4b1f440a904..2306e742b48 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,11 +1,11 @@ package: github.com/openshift/api import: - package: k8s.io/apimachinery - version: kubernetes-1.16.0 + version: kubernetes-1.17.0-rc.2 - package: k8s.io/api - version: kubernetes-1.16.0 + version: kubernetes-1.17.0-rc.2 - package: k8s.io/code-generator - version: kubernetes-1.16.0 + version: kubernetes-1.17.0-rc.2 # this matches the code-generator version - package: k8s.io/gengo version: 26a664648505d962332bda642b27306bc10d1082 diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index ada2b78e89d..e9cc2025852 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -57,6 +57,7 @@ import ( ) const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 // Marshaler is a configurable object for converting between // protocol buffer objects and a JSON representation for them. @@ -182,7 +183,12 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) } js["@type"] = (*json.RawMessage)(&turl) - if b, err = json.Marshal(js); err != nil { + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { return err } } @@ -206,19 +212,26 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU // Any is a bit more involved. return m.marshalAny(out, v, indent) case "Duration": - // "Generated output always contains 0, 3, 6, or 9 fractional digits, - // depending on required precision." s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } if ns <= -secondInNanos || ns >= secondInNanos { return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) } if (s > 0 && ns < 0) || (s < 0 && ns > 0) { return errors.New("signs of seconds and nanos do not match") } - if s < 0 { + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { ns = -ns + if s == 0 { + f = "-%d.%09d" + } } - x := fmt.Sprintf("%d.%09d", s, ns) + x := fmt.Sprintf(f, s, ns) x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, ".000") diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go index 45a13d45a55..fd06fc2fee4 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb_test.go @@ -473,10 +473,17 @@ var marshalingTests = []struct { {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON}, {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, - {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3s"}`}, - {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3, Nanos: 1e6}}, `{"dur":"3.001s"}`}, - {"Duration beyond float64 precision", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`}, - {"negative Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: -123, Nanos: -456}}, `{"dur":"-123.000000456s"}`}, + {"Duration empty", marshaler, &durpb.Duration{}, `"0s"`}, + {"Duration with secs", marshaler, &durpb.Duration{Seconds: 3}, `"3s"`}, + {"Duration with -secs", marshaler, &durpb.Duration{Seconds: -3}, `"-3s"`}, + {"Duration with nanos", marshaler, &durpb.Duration{Nanos: 1e6}, `"0.001s"`}, + {"Duration with -nanos", marshaler, &durpb.Duration{Nanos: -1e6}, `"-0.001s"`}, + {"Duration with large secs", marshaler, &durpb.Duration{Seconds: 1e10, Nanos: 1}, `"10000000000.000000001s"`}, + {"Duration with 6-digit nanos", marshaler, &durpb.Duration{Nanos: 1e4}, `"0.000010s"`}, + {"Duration with 3-digit nanos", marshaler, &durpb.Duration{Nanos: 1e6}, `"0.001s"`}, + {"Duration with -secs -nanos", marshaler, &durpb.Duration{Seconds: -123, Nanos: -450}, `"-123.000000450s"`}, + {"Duration max value", marshaler, &durpb.Duration{Seconds: 315576000000, Nanos: 999999999}, `"315576000000.999999999s"`}, + {"Duration min value", marshaler, &durpb.Duration{Seconds: -315576000000, Nanos: -999999999}, `"-315576000000.999999999s"`}, {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ Fields: map[string]*stpb.Value{ "one": {Kind: &stpb.Value_StringValue{"loneliest number"}}, @@ -549,15 +556,17 @@ func TestMarshalIllegalTime(t *testing.T) { pb proto.Message fail bool }{ - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: 0}}, false}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: 0}}, false}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: -1}}, true}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: 1}}, true}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: 1, Nanos: 1000000000}}, true}, - {&pb.KnownTypes{Dur: &durpb.Duration{Seconds: -1, Nanos: -1000000000}}, true}, - {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: 1}}, false}, - {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: -1}}, true}, - {&pb.KnownTypes{Ts: &tspb.Timestamp{Seconds: 1, Nanos: 1000000000}}, true}, + {&durpb.Duration{Seconds: 1, Nanos: 0}, false}, + {&durpb.Duration{Seconds: -1, Nanos: 0}, false}, + {&durpb.Duration{Seconds: 1, Nanos: -1}, true}, + {&durpb.Duration{Seconds: -1, Nanos: 1}, true}, + {&durpb.Duration{Seconds: 315576000001}, true}, + {&durpb.Duration{Seconds: -315576000001}, true}, + {&durpb.Duration{Seconds: 1, Nanos: 1000000000}, true}, + {&durpb.Duration{Seconds: -1, Nanos: -1000000000}, true}, + {&tspb.Timestamp{Seconds: 1, Nanos: 1}, false}, + {&tspb.Timestamp{Seconds: 1, Nanos: -1}, true}, + {&tspb.Timestamp{Seconds: 1, Nanos: 1000000000}, true}, } for _, tt := range tests { _, err := marshaler.MarshalToString(tt.pb) @@ -598,6 +607,28 @@ func TestMarshalAnyJSONPBMarshaler(t *testing.T) { if str != expected { t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected) } + + // Do it again, but this time with indentation: + + marshaler := Marshaler{Indent: " "} + str, err = marshaler.MarshalToString(a) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling Any to JSON: %v", err) + } + // same as expected above, but pretty-printed w/ indentation + expected = `{ + "@type": "type.googleapis.com/` + dynamicMessageName + `", + "baz": [ + 0, + 1, + 2, + 3 + ], + "foo": "bar" +}` + if str != expected { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected) + } } func TestMarshalWithCustomValidation(t *testing.T) { diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 79668ff5c5c..a4b8c0cd3a8 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -38,7 +38,6 @@ package proto import ( "fmt" "log" - "os" "reflect" "sort" "strconv" @@ -194,7 +193,7 @@ func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + log.Printf("proto: tag has too few fields: %q", s) return } @@ -214,7 +213,7 @@ func (p *Properties) Parse(s string) { p.WireType = WireBytes // no numeric converter for non-numeric types default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + log.Printf("proto: tag has unknown wire type: %q", s) return } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go index 1ddfe836f4d..5d1e3f0f619 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/grpc/grpc.go @@ -54,6 +54,8 @@ const generatedCodeVersion = 4 const ( contextPkgPath = "context" grpcPkgPath = "google.golang.org/grpc" + codePkgPath = "google.golang.org/grpc/codes" + statusPkgPath = "google.golang.org/grpc/status" ) func init() { @@ -216,6 +218,12 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P("}") g.P() + // Server Unimplemented struct for forward compatability. + if deprecated { + g.P(deprecationComment) + } + g.generateUnimplementedServer(servName, service) + // Server registration. if deprecated { g.P(deprecationComment) @@ -269,6 +277,35 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P() } +// generateUnimplementedServer creates the unimplemented server struct +func (g *grpc) generateUnimplementedServer(servName string, service *pb.ServiceDescriptorProto) { + serverType := servName + "Server" + g.P("// Unimplemented", serverType, " can be embedded to have forward compatible implementations.") + g.P("type Unimplemented", serverType, " struct {") + g.P("}") + g.P() + // UnimplementedServer's concrete methods + for _, method := range service.Method { + g.generateServerMethodConcrete(servName, method) + } + g.P() +} + +// generateServerMethodConcrete returns unimplemented methods which ensure forward compatibility +func (g *grpc) generateServerMethodConcrete(servName string, method *pb.MethodDescriptorProto) { + header := g.generateServerSignatureWithParamNames(servName, method) + g.P("func (*Unimplemented", servName, "Server) ", header, " {") + var nilArg string + if !method.GetServerStreaming() && !method.GetClientStreaming() { + nilArg = "nil, " + } + methName := generator.CamelCase(method.GetName()) + statusPkg := string(g.gen.AddImport(statusPkgPath)) + codePkg := string(g.gen.AddImport(codePkgPath)) + g.P("return ", nilArg, statusPkg, `.Errorf(`, codePkg, `.Unimplemented, "method `, methName, ` not implemented")`) + g.P("}") +} + // generateClientSignature returns the client-side signature for a method. func (g *grpc) generateClientSignature(servName string, method *pb.MethodDescriptorProto) string { origMethName := method.GetName() @@ -368,6 +405,30 @@ func (g *grpc) generateClientMethod(servName, fullServName, serviceDescVar strin } } +// generateServerSignatureWithParamNames returns the server-side signature for a method with parameter names. +func (g *grpc) generateServerSignatureWithParamNames(servName string, method *pb.MethodDescriptorProto) string { + origMethName := method.GetName() + methName := generator.CamelCase(origMethName) + if reservedClientName[methName] { + methName += "_" + } + + var reqArgs []string + ret := "error" + if !method.GetServerStreaming() && !method.GetClientStreaming() { + reqArgs = append(reqArgs, "ctx "+contextPkg+".Context") + ret = "(*" + g.typeName(method.GetOutputType()) + ", error)" + } + if !method.GetClientStreaming() { + reqArgs = append(reqArgs, "req *"+g.typeName(method.GetInputType())) + } + if method.GetServerStreaming() || method.GetClientStreaming() { + reqArgs = append(reqArgs, "srv "+servName+"_"+generator.CamelCase(origMethName)+"Server") + } + + return methName + "(" + strings.Join(reqArgs, ", ") + ") " + ret +} + // generateServerSignature returns the server-side signature for a method. func (g *grpc) generateServerSignature(servName string, method *pb.MethodDescriptorProto) string { origMethName := method.GetName() diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go index 5af4d22e2e9..2515c99eb9b 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/deprecated/deprecated.pb.go @@ -10,6 +10,8 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -235,6 +237,15 @@ type DeprecatedServiceServer interface { DeprecatedCall(context.Context, *DeprecatedRequest) (*DeprecatedResponse, error) } +// Deprecated: Do not use. +// UnimplementedDeprecatedServiceServer can be embedded to have forward compatible implementations. +type UnimplementedDeprecatedServiceServer struct { +} + +func (*UnimplementedDeprecatedServiceServer) DeprecatedCall(ctx context.Context, req *DeprecatedRequest) (*DeprecatedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeprecatedCall not implemented") +} + // Deprecated: Do not use. func RegisterDeprecatedServiceServer(s *grpc.Server, srv DeprecatedServiceServer) { s.RegisterService(&_DeprecatedService_serviceDesc, srv) diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go index 98e4f40cd05..76c9a20a117 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc.pb.go @@ -8,6 +8,8 @@ import ( fmt "fmt" proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" math "math" ) @@ -321,6 +323,23 @@ type TestServer interface { Bidi(Test_BidiServer) error } +// UnimplementedTestServer can be embedded to have forward compatible implementations. +type UnimplementedTestServer struct { +} + +func (*UnimplementedTestServer) UnaryCall(ctx context.Context, req *SimpleRequest) (*SimpleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnaryCall not implemented") +} +func (*UnimplementedTestServer) Downstream(req *SimpleRequest, srv Test_DownstreamServer) error { + return status.Errorf(codes.Unimplemented, "method Downstream not implemented") +} +func (*UnimplementedTestServer) Upstream(srv Test_UpstreamServer) error { + return status.Errorf(codes.Unimplemented, "method Upstream not implemented") +} +func (*UnimplementedTestServer) Bidi(srv Test_BidiServer) error { + return status.Errorf(codes.Unimplemented, "method Bidi not implemented") +} + func RegisterTestServer(s *grpc.Server, srv TestServer) { s.RegisterService(&_Test_serviceDesc, srv) } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc_empty.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc_empty.pb.go new file mode 100644 index 00000000000..9c6244bc7af --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc_empty.pb.go @@ -0,0 +1,79 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc/grpc_empty.proto + +package testing + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("grpc/grpc_empty.proto", fileDescriptor_c580a37f1c90e9b1) } + +var fileDescriptor_c580a37f1c90e9b1 = []byte{ + // 125 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0xd6, 0x07, 0x11, 0xf1, 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, + 0x3c, 0x20, 0x11, 0xbd, 0x92, 0xd4, 0xe2, 0x92, 0xcc, 0xbc, 0x74, 0x23, 0x3e, 0x2e, 0x1e, 0x57, + 0x90, 0x64, 0x70, 0x6a, 0x51, 0x59, 0x66, 0x72, 0xaa, 0x93, 0x43, 0x94, 0x5d, 0x7a, 0x66, 0x49, + 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, 0xba, 0x3e, 0x58, + 0x63, 0x52, 0x69, 0x1a, 0x84, 0x91, 0xac, 0x9b, 0x9e, 0x9a, 0xa7, 0x9b, 0x9e, 0xaf, 0x0f, 0x32, + 0x23, 0x25, 0xb1, 0x24, 0x11, 0x6c, 0x87, 0x35, 0xd4, 0xc4, 0x24, 0x36, 0xb0, 0x22, 0x63, 0x40, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x93, 0x1d, 0xf2, 0x47, 0x7f, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// EmptyServiceClient is the client API for EmptyService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type EmptyServiceClient interface { +} + +type emptyServiceClient struct { + cc *grpc.ClientConn +} + +func NewEmptyServiceClient(cc *grpc.ClientConn) EmptyServiceClient { + return &emptyServiceClient{cc} +} + +// EmptyServiceServer is the server API for EmptyService service. +type EmptyServiceServer interface { +} + +// UnimplementedEmptyServiceServer can be embedded to have forward compatible implementations. +type UnimplementedEmptyServiceServer struct { +} + +func RegisterEmptyServiceServer(s *grpc.Server, srv EmptyServiceServer) { + s.RegisterService(&_EmptyService_serviceDesc, srv) +} + +var _EmptyService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.testing.EmptyService", + HandlerType: (*EmptyServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc/grpc_empty.proto", +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc_empty.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc_empty.proto new file mode 100644 index 00000000000..ae07b81caa7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/testdata/grpc/grpc_empty.proto @@ -0,0 +1,38 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2019 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package grpc.testing; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/testdata/grpc;testing"; + +service EmptyService {} diff --git a/vendor/github.com/json-iterator/go/api_tests/marshal_json_escape_test.go b/vendor/github.com/json-iterator/go/api_tests/marshal_json_escape_test.go new file mode 100644 index 00000000000..b50e72fff1d --- /dev/null +++ b/vendor/github.com/json-iterator/go/api_tests/marshal_json_escape_test.go @@ -0,0 +1,47 @@ +package test + +import ( + "bytes" + "encoding/json" + "testing" + + jsoniter "github.com/json-iterator/go" + "github.com/stretchr/testify/require" +) + +var marshalConfig = jsoniter.Config{ + EscapeHTML: false, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +type Container struct { + Bar interface{} +} + +func (c *Container) MarshalJSON() ([]byte, error) { + return marshalConfig.Marshal(&c.Bar) +} + +func TestEncodeEscape(t *testing.T) { + should := require.New(t) + + container := &Container{ + Bar: []string{"123", "ooo"}, + } + out, err := marshalConfig.Marshal(container) + should.Nil(err) + bufout := string(out) + + var stdbuf bytes.Buffer + stdenc := json.NewEncoder(&stdbuf) + stdenc.SetEscapeHTML(false) + err = stdenc.Encode(container) + should.Nil(err) + stdout := string(stdbuf.Bytes()) + if stdout[len(stdout)-1:] == "\n" { + stdout = stdout[:len(stdout)-1] + } + + should.Equal(stdout, bufout) +} diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go index 95ae54fbfe4..29b31cf7895 100644 --- a/vendor/github.com/json-iterator/go/iter.go +++ b/vendor/github.com/json-iterator/go/iter.go @@ -74,6 +74,7 @@ type Iterator struct { buf []byte head int tail int + depth int captureStartedAt int captured []byte Error error @@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator { buf: nil, head: 0, tail: 0, + depth: 0, } } @@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { buf: make([]byte, bufSize), head: 0, tail: 0, + depth: 0, } } @@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator { buf: input, head: 0, tail: len(input), + depth: 0, } } @@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator { iter.reader = reader iter.head = 0 iter.tail = 0 + iter.depth = 0 return iter } @@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator { iter.buf = input iter.head = 0 iter.tail = len(input) + iter.depth = 0 return iter } @@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} { return nil } } + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go index 6188cb4577a..204fe0e0922 100644 --- a/vendor/github.com/json-iterator/go/iter_array.go +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) { func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { c := iter.nextToken() if c == '[' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c != ']' { iter.unreadByte() if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() for c == ',' { if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != ']' { iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } - return true + return iter.decrementDepth() } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index 1c575767130..b65137114f6 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() var field string if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() @@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { @@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() field := iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { field = iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go index 8fcdc3b69bd..9303de41e40 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() { func (iter *Iterator) skipArray() { level := 1 + if !iter.incrementDepth() { + return + } for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() { i = iter.head - 1 // it will be i++ soon case '[': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case ']': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { @@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() { func (iter *Iterator) skipObject() { level := 1 + if !iter.incrementDepth() { + return + } + for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() { i = iter.head - 1 // it will be i++ soon case '{': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case '}': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { diff --git a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_map_test.go b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_map_test.go index b0dde94cfdd..b73de698df4 100644 --- a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_map_test.go +++ b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_map_test.go @@ -42,3 +42,11 @@ func Test_map_eface_of_eface(t *testing.T) { should.NoError(err) should.Equal(`{"1":2,"3":"4"}`, output) } + +func Test_encode_nil_map(t *testing.T) { + should := require.New(t) + var nilMap map[string]string + output, err := jsoniter.MarshalToString(nilMap) + should.NoError(err) + should.Equal(`null`, output) +} diff --git a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_nested_test.go b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_nested_test.go index 1e4994a1325..fb2e60fa637 100644 --- a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_nested_test.go +++ b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_nested_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "github.com/json-iterator/go" "reflect" + "strings" "testing" ) @@ -15,6 +16,243 @@ type Level2 struct { World string } +func Test_deep_nested(t *testing.T) { + type unstructured interface{} + + testcases := []struct { + name string + data []byte + expectError string + }{ + { + name: "array under maxDepth", + data: []byte(`{"a":` + strings.Repeat(`[`, 10000-1) + strings.Repeat(`]`, 10000-1) + `}`), + expectError: "", + }, + { + name: "array over maxDepth", + data: []byte(`{"a":` + strings.Repeat(`[`, 10000) + strings.Repeat(`]`, 10000) + `}`), + expectError: "max depth", + }, + { + name: "object under maxDepth", + data: []byte(`{"a":` + strings.Repeat(`{"a":`, 10000-1) + `0` + strings.Repeat(`}`, 10000-1) + `}`), + expectError: "", + }, + { + name: "object over maxDepth", + data: []byte(`{"a":` + strings.Repeat(`{"a":`, 10000) + `0` + strings.Repeat(`}`, 10000) + `}`), + expectError: "max depth", + }, + } + + targets := []struct { + name string + new func() interface{} + }{ + { + name: "unstructured", + new: func() interface{} { + var v interface{} + return &v + }, + }, + { + name: "typed named field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + }{} + return &v + }, + }, + { + name: "typed missing field", + new: func() interface{} { + v := struct { + B interface{} `json:"b"` + }{} + return &v + }, + }, + { + name: "typed 1 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + }{} + return &v + }, + }, + { + name: "typed 2 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + }{} + return &v + }, + }, + { + name: "typed 3 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + }{} + return &v + }, + }, + { + name: "typed 4 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + }{} + return &v + }, + }, + { + name: "typed 5 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + E interface{} `json:"e"` + }{} + return &v + }, + }, + { + name: "typed 6 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + E interface{} `json:"e"` + F interface{} `json:"f"` + }{} + return &v + }, + }, + { + name: "typed 7 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + E interface{} `json:"e"` + F interface{} `json:"f"` + G interface{} `json:"g"` + }{} + return &v + }, + }, + { + name: "typed 8 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + E interface{} `json:"e"` + F interface{} `json:"f"` + G interface{} `json:"g"` + H interface{} `json:"h"` + }{} + return &v + }, + }, + { + name: "typed 9 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + E interface{} `json:"e"` + F interface{} `json:"f"` + G interface{} `json:"g"` + H interface{} `json:"h"` + I interface{} `json:"i"` + }{} + return &v + }, + }, + { + name: "typed 10 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + E interface{} `json:"e"` + F interface{} `json:"f"` + G interface{} `json:"g"` + H interface{} `json:"h"` + I interface{} `json:"i"` + J interface{} `json:"j"` + }{} + return &v + }, + }, + { + name: "typed 11 field", + new: func() interface{} { + v := struct { + A interface{} `json:"a"` + B interface{} `json:"b"` + C interface{} `json:"c"` + D interface{} `json:"d"` + E interface{} `json:"e"` + F interface{} `json:"f"` + G interface{} `json:"g"` + H interface{} `json:"h"` + I interface{} `json:"i"` + J interface{} `json:"j"` + K interface{} `json:"k"` + }{} + return &v + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + for _, target := range targets { + t.Run(target.name, func(t *testing.T) { + err := jsoniter.Unmarshal(tc.data, target.new()) + if len(tc.expectError) == 0 { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + } else { + if err == nil { + t.Errorf("expected error, got none") + } else if !strings.Contains(err.Error(), tc.expectError) { + t.Errorf("expected error containing '%s', got: %v", tc.expectError, err) + } + } + }) + } + }) + } +} + func Test_nested(t *testing.T) { iter := jsoniter.ParseString(jsoniter.ConfigDefault, `{"hello": [{"world": "value1"}, {"world": "value2"}]}`) l1 := Level1{} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 4459e203fb8..74974ba74b0 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx { // ReadVal copy the underlying JSON into go interface, same as json.Unmarshal func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth cacheKey := reflect2.RTypeOf(obj) decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { @@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) { return } decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } } // WriteVal copy the go interface into underlying JSON, same as json.Marshal diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 05e8fbf1fed..e27e8d19179 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { if ctx.onlyTaggedField && !hastag && !field.Anonymous() { continue } - tagParts := strings.Split(tag, ",") if tag == "-" { continue } + tagParts := strings.Split(tag, ",") if field.Anonymous() && (tag == "" || tagParts[0] == "") { if field.Type().Kind() == reflect.Struct { structDescriptor := describeStruct(ctx, field.Type()) diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 547b4421e34..08e9a391251 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -249,6 +249,10 @@ type mapEncoder struct { } func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } stream.WriteObjectStart() iter := encoder.mapType.UnsafeIterate(ptr) for i := 0; iter.HasNext(); i++ { diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go index fea50719de9..3e21f375671 100644 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -3,8 +3,9 @@ package jsoniter import ( "encoding" "encoding/json" - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() @@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteNil() return } - bytes, err := json.Marshal(obj) + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() if err != nil { stream.Error = err } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } stream.Write(bytes) } } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 932641ac46b..5ad5cc561af 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } var c byte for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) @@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if c != '}' { iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) } + iter.decrementDepth() } func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { @@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { if iter.readFieldHash() == decoder.fieldHash { decoder.fieldDecoder.Decode(ptr, iter) @@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type twoFieldsStructDecoder struct { @@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type threeFieldsStructDecoder struct { @@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fourFieldsStructDecoder struct { @@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fiveFieldsStructDecoder struct { @@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sixFieldsStructDecoder struct { @@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sevenFieldsStructDecoder struct { @@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type eightFieldsStructDecoder struct { @@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type nineFieldsStructDecoder struct { @@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type tenFieldsStructDecoder struct { @@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type structFieldDecoder struct { diff --git a/vendor/github.com/openshift/library-go/.gitignore b/vendor/github.com/openshift/library-go/.gitignore new file mode 100644 index 00000000000..533a4d2b86b --- /dev/null +++ b/vendor/github.com/openshift/library-go/.gitignore @@ -0,0 +1,20 @@ +/_output +/third-party +/.project +/.vagrant +/.vscode +/.settings +/cpu.pprof +/os-version-defs +/.make/ +*.swp +.vimrc +.DS_Store +.idea +origin.iml +*.pyc +.tag* +.project +*.go~ +.envrc +.hg_archival.txt diff --git a/vendor/github.com/openshift/library-go/LICENSE b/vendor/github.com/openshift/library-go/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/library-go/Makefile b/vendor/github.com/openshift/library-go/Makefile new file mode 100644 index 00000000000..8f164242c6c --- /dev/null +++ b/vendor/github.com/openshift/library-go/Makefile @@ -0,0 +1,23 @@ +all: build +.PHONY: all + +# All the go packages (e.g. for verfy) +GO_PACKAGES :=./pkg/... +# Packages to be compiled +GO_BUILD_PACKAGES :=$(GO_PACKAGES) +# Do not auto-expand packages for libraries or it would compile them separately +GO_BUILD_PACKAGES_EXPANDED :=$(GO_BUILD_PACKAGES) + +include $(addprefix alpha-build-machinery/make/, \ + golang.mk \ + targets/openshift/deps.mk \ + targets/openshift/bindata.mk \ +) + +$(call add-bindata,backingresources,./pkg/operator/staticpod/controller/backingresource/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/backingresource/bindata/bindata.go) +$(call add-bindata,monitoring,./pkg/operator/staticpod/controller/monitoring/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/monitoring/bindata/bindata.go) +$(call add-bindata,installer,./pkg/operator/staticpod/controller/installer/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/installer/bindata/bindata.go) +$(call add-bindata,staticpod,./pkg/operator/staticpod/controller/prune/manifests/...,bindata,bindata,./pkg/operator/staticpod/controller/prune/bindata/bindata.go) + +test-e2e-encryption: GO_TEST_PACKAGES :=./test/e2e-encryption/... +.PHONY: test-e2e-encryption \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/OWNERS b/vendor/github.com/openshift/library-go/OWNERS new file mode 100644 index 00000000000..b372622b7b3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - smarterclayton + - deads2k + - sttts +approvers: + - smarterclayton + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/README.md b/vendor/github.com/openshift/library-go/README.md new file mode 100644 index 00000000000..db058062608 --- /dev/null +++ b/vendor/github.com/openshift/library-go/README.md @@ -0,0 +1,4 @@ +# library-go +Helpers for going from apis and clients to useful runtime constructs. `config.ServingInfo` to useful serving constructs is the canonical example. Anything introduced here must have concrete use-cases in at least two separate openshift repos and be of some reasonable complexity. The bar here is high. We'll start with openshift/api-review as the approvers. + +This repo **must not depend on k8s.io/kubernetes or openshift/origin**. diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile new file mode 100644 index 00000000000..b44d020e4b6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile @@ -0,0 +1,61 @@ +SHELL :=/bin/bash +all: verify +.PHONY: all + +makefiles :=$(wildcard ./make/*.example.mk) +examples :=$(wildcard ./make/examples/*/Makefile.test) + +# $1 - makefile name relative to ./make/ folder +# $2 - target +# $3 - output folder +# We need to change dir to the final makefile directory or relative paths won't match. +# Dynamic values are replaced with "" so we can do diff against checkout versions. +# Avoid comparing local paths by stripping the prefix. +# Delete lines referencing temporary files and directories +# Unify make error output between versions +# Ignore old cp errors on centos7 +# Ignore different make output with `-k` option +define update-makefile-log +mkdir -p "$(3)" +set -o pipefail; $(MAKE) -j 1 -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | \ + sed 's/\.\(buildDate\|versionFromGit\|commitFromGit\|gitTreeState\)="[^"]*" /.\1="" /g' | \ + sed -E 's~/.*/(github.com/openshift/library-go/alpha-build-machinery/.*)~/\1~g' | \ + sed '/\/tmp\/tmp./d' | \ + sed '/git checkout -b/d' | \ + sed -E 's~^[<> ]*((\+\+\+|\-\-\-) \./(testing/)?manifests/.*.yaml).*~\1~' | \ + sed -E 's/^(make\[2\]: \*\*\* \[).*: (.*\] Error 1)/\1\2/' | \ + grep -v 'are the same file' | \ + grep -E -v -e '^make\[2\]: Target `.*'"'"' not remade because of errors\.$$' | \ + tee "$(3)"/"$(notdir $(1))"$(subst ..,.,.$(2).log) + +endef + + +# $1 - makefile name relative to ./make/ folder +# $2 - target +# $3 - output folder +define check-makefile-log +$(call update-makefile-log,$(1),$(2),$(3)) +diff -N "$(1)$(subst ..,.,.$(2).log)" "$(3)/$(notdir $(1))$(subst ..,.,.$(2).log)" + +endef + +update-makefiles: + $(foreach f,$(makefiles),$(call check-makefile-log,$(f),help,$(dir $(f)))) + $(foreach f,$(examples),$(call check-makefile-log,$(f),,$(dir $(f)))) +.PHONY: update-makefiles + +verify-makefiles: tmp_dir:=$(shell mktemp -d) +verify-makefiles: + $(foreach f,$(makefiles),$(call check-makefile-log,$(f),help,$(tmp_dir)/$(dir $(f)))) + $(foreach f,$(examples),$(call check-makefile-log,$(f),,$(tmp_dir)/$(dir $(f)))) +.PHONY: verify-makefiles + +verify: verify-makefiles +.PHONY: verify + +update: update-makefiles +.PHONY: update + + +include ./make/targets/help.mk diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/OWNERS b/vendor/github.com/openshift/library-go/alpha-build-machinery/OWNERS new file mode 100644 index 00000000000..ff2b6a24c8c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - tnozicka +approvers: + - tnozicka diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/README.md b/vendor/github.com/openshift/library-go/alpha-build-machinery/README.md new file mode 100644 index 00000000000..294a5834a82 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/README.md @@ -0,0 +1,37 @@ +# library-go/alpha-build-machinery +These are the building blocks for this and many of our other repositories to share code for Makefiles, helper scripts and other build related machinery. + +## Makefiles +`make/` directory contains several predefined makefiles `(*.mk)` to choose from and include one of them as a base in your final `Makefile`. These are the predefined flows providing you with e.g. `build`, `test` or `verify` targets. To start with it is recommended you base Makefile on the corresponding `*.example.mk` using copy&paste. + +As some advanced targets are generated, every Makefile contains `make help` target listing all the available ones. All of the "example" makefiles have a corresponding `.help` file listing all the targets available there. + +Also for advanced use and if none of the predefined flows doesn't fit your needs, you can compose the flow from modules in similar way to how the predefined flows do, + +### Golang +Standard makefile for building pure Golang projects. + - [make/golang.mk](make/golang.mk) + - [make/golang.example.mk](make/golang.example.mk) + - [make/golang.example.mk.help](make/golang.example.mk.help) + +### Default +Standard makefile for OpenShift Golang projects. + +Extends [#Golang](). + + - [make/default.mk](make/default.mk) + - [make/default.example.mk](make/default.example.mk) + - [make/default.example.mk.help](make/default.example.mk.help) + +### Operator +Standard makefile for OpenShift Golang projects. + +Extends [#Default](). + + - [make/operator.mk](make/operator.mk) + - [make/operator.example.mk](make/operator.example.mk) + - [make/operator.example.mk.help](make/operator.example.mk.help) + + +## Scripts +`scripts` contain more complicated logic that is used in some make targets. diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/doc.go new file mode 100644 index 00000000000..a093b4bd173 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/doc.go @@ -0,0 +1,14 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery + +// this is a dependency magnet to make it easier to pull in the build-machinery. We want a single import to pull all of it in. +import ( + _ "github.com/openshift/library-go/alpha-build-machinery/make" + _ "github.com/openshift/library-go/alpha-build-machinery/make/lib" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/golang" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator" + _ "github.com/openshift/library-go/alpha-build-machinery/scripts" +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk new file mode 100644 index 00000000000..fffc5b3a3c9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk @@ -0,0 +1,40 @@ +all: build +.PHONY: all + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./default.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` + +# Codegen module needs setting these required variables +CODEGEN_OUTPUT_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/generated +CODEGEN_API_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/apis +CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 +# You can list all codegen related variables by: +# $ make -n --print-data-base | grep ^CODEGEN + +# This will call a macro called "build-image" which will generate image specific targets based on the parameters: +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context +# It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". +$(call build-image,ocp-cli,registry.svc.ci.openshift.org/ocp/4.2:cli,./images/cli/Dockerfile.rhel,.) + +# This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +# It will generate targets {update,verify}-bindata-$(1) logically grouping them in unsuffixed versions of these targets +# and also hooked into {update,verify}-generated for broader integration. +$(call add-bindata,v3.11.0,./bindata/v3.11.0/...,bindata,v311_00_assets,pkg/operator/v311_00_assets/bindata.go) + diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log new file mode 100644 index 00000000000..92aa6acdbb8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.example.mk.help.log @@ -0,0 +1,25 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +image-ocp-cli +images +test +test-unit +update +update-bindata +update-codegen +update-deps +update-deps-overrides +update-generated +update-gofmt +verify +verify-bindata +verify-codegen +verify-deps +verify-generated +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk new file mode 100644 index 00000000000..30806edb120 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk @@ -0,0 +1,23 @@ +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + +# We extend the default verify/update for Golang + +verify: verify-codegen +verify: verify-bindata +.PHONY: verify + +update: update-codegen +update: update-bindata +.PHONY: update + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + targets/openshift/deps.mk \ + targets/openshift/images.mk \ + targets/openshift/bindata.mk \ + targets/openshift/codegen.mk \ + golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/doc.go new file mode 100644 index 00000000000..66ba5512e1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore new file mode 100644 index 00000000000..d06fd13720c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore @@ -0,0 +1,3 @@ +/oc +/openshift +/_output/ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile new file mode 100644 index 00000000000..47380db0a5c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile @@ -0,0 +1,40 @@ +include $(addprefix ../../, \ + golang.mk \ + targets/openshift/rpm.mk \ + targets/openshift/crd-schema-gen.mk \ + targets/openshift/deps.mk \ +) + +# Set crd-schema-gen variables +CRD_APIS :=$(addprefix ./pkg/apis/,v1 v1beta1) + +# rpm wants build-id set +GO_LD_EXTRAFLAGS +=-B 0x$$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n') + +OUTPUT_DIR :=_output +CROSS_BUILD_BINDIR :=$(OUTPUT_DIR)/bin +RPM_EXTRAFLAGS :=--quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' + +# $1 - target name +# $2 - apis +# $3 - manifests +# $4 - output +$(call add-crd-gen,manifests,$(CRD_APIS),./manifests,./manifests) + +cross-build-darwin-amd64: + +@GOOS=darwin GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/darwin_amd64 +.PHONY: cross-build-darwin-amd64 + +cross-build-windows-amd64: + +@GOOS=windows GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/windows_amd64 +.PHONY: cross-build-windows-amd64 + +cross-build: cross-build-darwin-amd64 cross-build-windows-amd64 +.PHONY: cross-build + +clean-cross-build: + $(RM) -r '$(CROSS_BUILD_BINDIR)' + if [ -d '$(OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(OUTPUT_DIR)'; fi +.PHONY: clean-cross-build + +clean: clean-cross-build diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log new file mode 100644 index 00000000000..ef2631a76bd --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log @@ -0,0 +1,187 @@ +make build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +[[ -f ./openshift ]] +[[ -f ./oc ]] +# test version is set correctly when linking +# majorFromGit, minorFromGit are deprecated upstream and set to empty value +# we avoid comparing time to avoid flakes +diff <( ./oc | sed '$d' ) <( \ + echo '' && \ + echo '' && \ + echo 'aaa' && \ + echo 'v42.43.44' && \ + echo 'clean' \ +) +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -f ./openshift ]] +[[ ! -f ./oc ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make cross-build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean +mkdir -p '_output/bin/darwin_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/oc' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +mkdir -p '_output/bin/darwin_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/openshift' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +mkdir -p '_output/bin/windows_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/windows_amd64/oc.exe' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +mkdir -p '_output/bin/windows_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/windows_amd64/openshift.exe' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +[[ ! -f ./openshift ]] +[[ ! -f ./oc ]] +[[ -f ./_output/bin/darwin_amd64/openshift ]] +[[ -f ./_output/bin/darwin_amd64/oc ]] +[[ -f ./_output/bin/windows_amd64/openshift.exe ]] +[[ -f ./_output/bin/windows_amd64/oc.exe ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make rpm-build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean +rpmbuild -ba --define "_topdir /github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --define "go_package github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' ocp.spec +[[ -f ./_output/rpms/x86_64/openshift-2.42.0-6.el7.x86_64.rpm ]] +[[ -f ./_output/srpms/openshift-2.42.0-6.el7.src.rpm ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +cp -r ./testing/manifests/initial/* ./manifests/ +diff -Naup ./testing/manifests/initial/ ./manifests/ +! make verify-codegen-crds +Installing controller-gen into '_output/tools/bin/controller-gen' +mkdir -p '_output/tools/bin/' +curl -s -f -L https://github.com/openshift/kubernetes-sigs-controller-tools/releases/download/v0.2.1-37-ga3cca5d/controller-gen-linux-amd64 -o '_output/tools/bin/controller-gen' +chmod +x '_output/tools/bin/controller-gen'; +Installing yq into '_output/tools/bin/yq' +mkdir -p '_output/tools/bin/' +curl -s -f -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_amd64 -o '_output/tools/bin/yq' +chmod +x '_output/tools/bin/yq'; +--- ./manifests/operator.openshift.io_myotheroperatorresources.crd.yaml +@@ -9,6 +9,40 @@ spec: + kind: MyOtherOperatorResource + plural: myotheroperatorresources + scope: "" ++ validation: ++ openAPIV3Schema: ++ description: MyOtherOperatorResource is an example operator configuration type ++ properties: ++ apiVersion: ++ description: 'APIVersion defines the versioned schema of this representation ++ of an object. Servers should convert recognized schemas to the latest ++ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' ++ type: string ++ kind: ++ description: 'Kind is a string value representing the REST resource this ++ object represents. Servers may infer this from the endpoint the client ++ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' ++ type: string ++ metadata: ++ type: object ++ spec: ++ properties: ++ deprecatedField: ++ type: string ++ name: ++ type: string ++ overwritePattern: ++ pattern: ^(Managed|Unmanaged)$ ++ type: string ++ required: ++ - deprecatedField ++ - name ++ - overwritePattern ++ type: object ++ required: ++ - metadata ++ - spec ++ type: object + version: v1beta1 + versions: + - name: v1beta1 +make[2]: *** [verify-codegen-crds-manifests] Error 1 +make update-codegen-crds +Using existing controller-gen from "_output/tools/bin/controller-gen" +Using existing yq from "_output/tools/bin/yq" +'_output/tools/bin/controller-gen' schemapatch:manifests="./manifests" paths="./pkg/apis/v1;./pkg/apis/v1beta1" output:dir="./manifests" +_output/tools/bin/yq m -i -x './manifests/operator.openshift.io_myotheroperatorresources.crd.yaml' './manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch' +_output/tools/bin/yq m -i -x './manifests/operator.openshift.io_myoperatorresources.crd.yaml' './manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch' +make verify-codegen-crds +Using existing controller-gen from "_output/tools/bin/controller-gen" +Using existing yq from "_output/tools/bin/yq" +cp -r ./testing/manifests/initial/*.crd.yaml-merge-patch ./manifests/ +! diff -Naup ./testing/manifests/initial/ ./manifests/ 2>/dev/null 1>&2 +diff -Naup ./testing/manifests/updated/ ./manifests/ +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f '_output/tools/bin/controller-gen' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f '_output/tools/bin/yq' +if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_output/tools/bin/'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go new file mode 100644 index 00000000000..cf699883ef2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "fmt" + + "github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version" +) + +func main() { + fmt.Print(version.String()) +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift/main.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift/main.go new file mode 100644 index 00000000000..79058077776 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift/main.go @@ -0,0 +1,5 @@ +package main + +func main() { + +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.lock b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.lock new file mode 100644 index 00000000000..339b04e1a04 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.lock @@ -0,0 +1,61 @@ +hash: 1aaf998ca41bbf291600272fff83e55b5e9536f817ab15aedb2fa9459b15336f +updated: 2019-10-07T15:28:28.103413125+02:00 +imports: +- name: github.com/gogo/protobuf + version: 65acae22fc9d1fe290b33faa2bd64cdc20a463a0 + subpackages: + - proto + - sortkeys +- name: github.com/google/gofuzz + version: f140a6486e521aad38f5917de355cbf147cc0496 +- name: github.com/openshift/api + version: d92789481b059132f1febccd9bb47cb27220f248 + subpackages: + - config/v1 +- name: golang.org/x/net + version: cdfb69ac37fc6fa907650654115ebebb3aae2087 + subpackages: + - http/httpguts + - http2 + - http2/hpack + - idna +- name: golang.org/x/text + version: e6919f6577db79269a6443b9dc46d18f2238fb5d + subpackages: + - secure/bidirule + - transform + - unicode/bidi + - unicode/norm +- name: gopkg.in/inf.v0 + version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: k8s.io/api + version: 95b840bb6a1f5f0462af804c8589396d294d4914 + subpackages: + - core/v1 +- name: k8s.io/apimachinery + version: 27d36303b6556f377b4f34e64705fa9024a12b0c + subpackages: + - pkg/api/resource + - pkg/apis/meta/v1 + - pkg/conversion + - pkg/conversion/queryparams + - pkg/fields + - pkg/labels + - pkg/runtime + - pkg/runtime/schema + - pkg/selection + - pkg/types + - pkg/util/errors + - pkg/util/intstr + - pkg/util/json + - pkg/util/naming + - pkg/util/net + - pkg/util/runtime + - pkg/util/sets + - pkg/util/validation + - pkg/util/validation/field + - pkg/watch + - third_party/forked/golang/reflect +- name: k8s.io/klog + version: 3ca30a56d8a775276f9cdae009ba326fdc05af7f +testImports: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.yaml new file mode 100644 index 00000000000..1c5768bcdf0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.yaml @@ -0,0 +1,4 @@ +package: github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries +import: +- package: github.com/openshift/api +- package: k8s.io/apimachinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml new file mode 100644 index 00000000000..c1a07112552 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml @@ -0,0 +1,22 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myoperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOperatorResource + plural: myoperatorresources + scope: "" + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: ^(test|TEST)$ +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch new file mode 100644 index 00000000000..b9f37c6e29a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch @@ -0,0 +1,6 @@ +spec: + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: "^(test|TEST)$" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml new file mode 100644 index 00000000000..05363164d38 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -0,0 +1,56 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myotheroperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOtherOperatorResource + plural: myotheroperatorresources + scope: "" + validation: + openAPIV3Schema: + description: MyOtherOperatorResource is an example operator configuration type + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + deprecatedField: + type: string + name: + type: string + overwritePattern: + pattern: ^(Managed|Unmanaged)$ + type: string + required: + - deprecatedField + - name + - overwritePattern + type: object + required: + - metadata + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch new file mode 100644 index 00000000000..1eebf507ecd --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch @@ -0,0 +1,8 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + overwritePattern: + pattern: ^(Managed|Unmanaged)$ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec new file mode 100644 index 00000000000..fc4117e0b68 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec @@ -0,0 +1,47 @@ +#debuginfo not supported with Go +%global debug_package %{nil} +# modifying the Go binaries breaks the DWARF debugging +%global __os_install_post %{_rpmconfigdir}/brp-compress + +%global golang_version 1.12 +%global product_name OpenShift + +%{!?version: %global version 0.0.1} +%{!?release: %global release 1} + +Name: openshift +Version: %{version} +Release: %{release}%{dist} +Summary: OpenShift client binaries +License: ASL 2.0 +URL: https://%{go_package} + +# If go_arches not defined fall through to implicit golang archs +%if 0%{?go_arches:1} +ExclusiveArch: %{go_arches} +%else +ExclusiveArch: x86_64 aarch64 ppc64le s390x +%endif + +#BuildRequires: bsdtar +BuildRequires: golang >= %{golang_version} + +%description +%{summary} + +%prep + +%build +make build + +%install +install -d %{buildroot}%{_bindir} + +install -p -m 755 oc %{buildroot}%{_bindir}/oc +install -p -m 755 openshift %{buildroot}%{_bindir}/openshift + +%files +%{_bindir}/oc +%{_bindir}/openshift + +%changelog diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/doc.go new file mode 100644 index 00000000000..fc32adfc73e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/doc.go @@ -0,0 +1,5 @@ +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/register.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/register.go new file mode 100644 index 00000000000..8b8850b1498 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/register.go @@ -0,0 +1,39 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &MyOperatorResource{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/types.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/types.go new file mode 100644 index 00000000000..d5a51243686 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1/types.go @@ -0,0 +1,25 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// MyOperatorResource is an example operator configuration type +type MyOperatorResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec MyOperatorResourceSpec `json:"spec"` +} + +type MyOperatorResourceSpec struct { + Name string `json:"name"` +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/doc.go new file mode 100644 index 00000000000..ee7e1e72387 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/doc.go @@ -0,0 +1,5 @@ +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=operator.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/register.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/register.go new file mode 100644 index 00000000000..3ee83634cea --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/register.go @@ -0,0 +1,39 @@ +package v1beta1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &MyOtherOperatorResource{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go new file mode 100644 index 00000000000..9169f90a614 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go @@ -0,0 +1,28 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true + +// MyOtherOperatorResource is an example operator configuration type +type MyOtherOperatorResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec MyOtherOperatorResourceSpec `json:"spec"` +} + +type MyOtherOperatorResourceSpec struct { + Name string `json:"name"` + DeprecatedField string `json:"deprecatedField"` + + // +kubebuilder:validation:Pattern=^(Managed|Unmanaged)$ + OverwritePattern string `json:"overwritePattern"` +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version/version.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version/version.go new file mode 100644 index 00000000000..4d118d3218c --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version/version.go @@ -0,0 +1,27 @@ +package version + +var ( + // commitFromGit is a constant representing the source version that + // generated this build. It should be set during build via -ldflags. + commitFromGit string + // versionFromGit is a constant representing the version tag that + // generated this build. It should be set during build via -ldflags. + versionFromGit = "unknown" + // major version + majorFromGit string + // minor version + minorFromGit string + // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + buildDate string + // state of git tree, either "clean" or "dirty" + gitTreeState string +) + +func String() string { + return majorFromGit + "\n" + + minorFromGit + "\n" + + commitFromGit + "\n" + + versionFromGit + "\n" + + gitTreeState + "\n" + + buildDate + "\n" +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml new file mode 100644 index 00000000000..f91f1f63e6b --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml @@ -0,0 +1,17 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myoperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOperatorResource + plural: myoperatorresources + scope: "" +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch new file mode 100644 index 00000000000..b9f37c6e29a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch @@ -0,0 +1,6 @@ +spec: + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: "^(test|TEST)$" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml new file mode 100644 index 00000000000..fe529245e94 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -0,0 +1,22 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myotheroperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOtherOperatorResource + plural: myotheroperatorresources + scope: "" + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch new file mode 100644 index 00000000000..1eebf507ecd --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch @@ -0,0 +1,8 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + overwritePattern: + pattern: ^(Managed|Unmanaged)$ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml new file mode 100644 index 00000000000..c1a07112552 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml @@ -0,0 +1,22 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myoperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOperatorResource + plural: myoperatorresources + scope: "" + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: ^(test|TEST)$ +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch new file mode 100644 index 00000000000..b9f37c6e29a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch @@ -0,0 +1,6 @@ +spec: + validation: + openAPIV3Schema: + properties: + apiVersion: + pattern: "^(test|TEST)$" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml new file mode 100644 index 00000000000..05363164d38 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -0,0 +1,56 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: myotheroperatorresources.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MyOtherOperatorResource + plural: myotheroperatorresources + scope: "" + validation: + openAPIV3Schema: + description: MyOtherOperatorResource is an example operator configuration type + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + deprecatedField: + type: string + name: + type: string + overwritePattern: + pattern: ^(Managed|Unmanaged)$ + type: string + required: + - deprecatedField + - name + - overwritePattern + type: object + required: + - metadata + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch new file mode 100644 index 00000000000..1eebf507ecd --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch @@ -0,0 +1,8 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + overwritePattern: + pattern: ^(Managed|Unmanaged)$ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk new file mode 100644 index 00000000000..aba2c489032 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk @@ -0,0 +1,14 @@ +all: build +.PHONY: all + + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./golang.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk.help.log new file mode 100644 index 00000000000..a5cc906ddf6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.example.mk.help.log @@ -0,0 +1,14 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +test +test-unit +update +update-gofmt +verify +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.mk new file mode 100644 index 00000000000..15a0b49bcb0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/golang.mk @@ -0,0 +1,28 @@ +all: build +.PHONY: all + +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + + +verify: verify-gofmt +verify: verify-govet +.PHONY: verify + +update: update-gofmt +.PHONY: update + + +test: test-unit +.PHONY: test + +clean: clean-binaries +.PHONY: clean + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + targets/help.mk \ + targets/golang/*.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/doc.go new file mode 100644 index 00000000000..66ba5512e1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk new file mode 100644 index 00000000000..89d457ef0e9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk @@ -0,0 +1,39 @@ +GO ?=go +GOPATH ?=$(shell $(GO) env GOPATH) +GO_PACKAGE ?=$(shell $(GO) list -e -f '{{ .ImportPath }}' . || echo 'no_package_detected') + +GOOS ?=$(shell $(GO) env GOOS) +GOHOSTOS ?=$(shell $(GO) env GOHOSTOS) +GOARCH ?=$(shell $(GO) env GOARCH) +GOHOSTARCH ?=$(shell $(GO) env GOHOSTARCH) +GOEXE ?=$(shell $(GO) env GOEXE) + +GOFMT ?=gofmt +GOFMT_FLAGS ?=-s -l +GOLINT ?=golint + +GO_FILES ?=$(shell find . -name '*.go' -not -path '*/vendor/*' -not -path '*/_output/*' -print) +GO_PACKAGES ?=./... +GO_TEST_PACKAGES ?=$(GO_PACKAGES) + +GO_BUILD_PACKAGES ?=./cmd/... +GO_BUILD_PACKAGES_EXPANDED ?=$(shell $(GO) list $(GO_BUILD_PACKAGES)) +go_build_binaries =$(notdir $(GO_BUILD_PACKAGES_EXPANDED)) +GO_BUILD_FLAGS ?= +GO_BUILD_BINDIR ?= + +GO_TEST_FLAGS ?=-race + +GO_LD_EXTRAFLAGS ?= + +SOURCE_GIT_TAG ?=$(shell git describe --long --tags --abbrev=7 --match 'v[0-9]*' || echo 'v0.0.0-unknown') +SOURCE_GIT_COMMIT ?=$(shell git rev-parse --short "HEAD^{commit}" 2>/dev/null) +SOURCE_GIT_TREE_STATE ?=$(shell ( ( [ ! -d ".git/" ] || git diff --quiet ) && echo 'clean' ) || echo 'dirty') + +define version-ldflags +-X $(1).versionFromGit="$(SOURCE_GIT_TAG)" \ +-X $(1).commitFromGit="$(SOURCE_GIT_COMMIT)" \ +-X $(1).gitTreeState="$(SOURCE_GIT_TREE_STATE)" \ +-X $(1).buildDate="$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')" +endef +GO_LD_FLAGS ?=-ldflags "-s -w $(call version-ldflags,$(GO_PACKAGE)/pkg/version) $(GO_LD_EXTRAFLAGS)" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/tmp.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/tmp.mk new file mode 100644 index 00000000000..a0fb655359a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/tmp.mk @@ -0,0 +1,2 @@ +PERMANENT_TMP :=_output +PERMANENT_TMP_GOPATH :=$(PERMANENT_TMP)/tools diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk new file mode 100644 index 00000000000..7e6ff98d56a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk @@ -0,0 +1,42 @@ +all: build +.PHONY: all + + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./operator.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` + + +# Codegen module needs setting these required variables +CODEGEN_OUTPUT_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/generated +CODEGEN_API_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/apis +CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 +# You can list all codegen related variables by: +# $ make -n --print-data-base | grep ^CODEGEN + +# This will call a macro called "build-image" which will generate image specific targets based on the parameters: +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context +# It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". +$(call build-image,ocp-openshift-apiserver-operator,registry.svc.ci.openshift.org/ocp/4.2:openshift-apiserver-operator,./Dockerfile.rhel,.) + +# This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +# It will generate targets {update,verify}-bindata-$(1) logically grouping them in unsuffixed versions of these targets +# and also hooked into {update,verify}-generated for broader integration. +$(call add-bindata,v3.11.0,./bindata/v3.11.0/...,bindata,v311_00_assets,pkg/operator/v311_00_assets/bindata.go) + diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log new file mode 100644 index 00000000000..a1489d212ba --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.example.mk.help.log @@ -0,0 +1,25 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +image-ocp-openshift-apiserver-operator +images +test +test-unit +update +update-bindata +update-codegen +update-deps +update-deps-overrides +update-generated +update-gofmt +verify +verify-bindata +verify-codegen +verify-deps +verify-generated +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.mk new file mode 100644 index 00000000000..d763df46176 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/operator.mk @@ -0,0 +1,11 @@ +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + default.mk \ + targets/openshift/operator/*.mk \ +) + diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/doc.go new file mode 100644 index 00000000000..66ba5512e1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk new file mode 100644 index 00000000000..07c15faa42b --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk @@ -0,0 +1,28 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +define build-package + $(if $(GO_BUILD_BINDIR),mkdir -p '$(GO_BUILD_BINDIR)',) + $(strip $(GO) build $(GO_BUILD_FLAGS) $(GO_LD_FLAGS) \ + $(if $(GO_BUILD_BINDIR),-o '$(GO_BUILD_BINDIR)/$(notdir $(1))$(GOEXE)',) \ + $(1)) + +endef + +# We need to build each package separately so go build creates appropriate binaries +build: + $(foreach package,$(GO_BUILD_PACKAGES_EXPANDED),$(call build-package,$(package))) +.PHONY: build + +clean-binaries: + $(RM) $(go_build_binaries) +.PHONY: clean-binaries + +clean: clean-binaries +.PHONY: clean + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/doc.go new file mode 100644 index 00000000000..66ba5512e1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk new file mode 100644 index 00000000000..908783aac03 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk @@ -0,0 +1,19 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +test-unit: +ifndef JUNITFILE + $(GO) test $(GO_TEST_FLAGS) $(GO_TEST_PACKAGES) +else +ifeq (, $(shell which gotest2junit 2>/dev/null)) + $(error gotest2junit not found! Get it by `go get -u github.com/openshift/release/tools/gotest2junit`.) +endif + set -o pipefail; $(GO) test $(GO_TEST_FLAGS) -json $(GO_TEST_PACKAGES) | gotest2junit > $(JUNITFILE) +endif +.PHONY: test-unit + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk new file mode 100644 index 00000000000..78b2783bafc --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk @@ -0,0 +1,34 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +go_files_count :=$(words $(GO_FILES)) + +verify-gofmt: + $(info Running `$(GOFMT) $(GOFMT_FLAGS)` on $(go_files_count) file(s).) + @TMP=$$( mktemp ); \ + $(GOFMT) $(GOFMT_FLAGS) $(GO_FILES) | tee $${TMP}; \ + if [ -s $${TMP} ]; then \ + echo "$@ failed - please run \`make update-gofmt\`"; \ + exit 1; \ + fi; +.PHONY: verify-gofmt + +update-gofmt: + $(info Running `$(GOFMT) $(GOFMT_FLAGS) -w` on $(go_files_count) file(s).) + @$(GOFMT) $(GOFMT_FLAGS) -w $(GO_FILES) +.PHONY: update-gofmt + + +verify-govet: + $(GO) vet $(GO_PACKAGES) +.PHONY: verify-govet + +verify-golint: + $(GOLINT) $(GO_PACKAGES) +.PHONY: verify-govet + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/help.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/help.mk new file mode 100644 index 00000000000..55bfbac094e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/help.mk @@ -0,0 +1,6 @@ +help: + $(info The following make targets are available:) + @$(MAKE) -f $(firstword $(MAKEFILE_LIST)) --print-data-base --question no-such-target 2>&1 | grep -v 'no-such-target' | \ + grep -v -e '^no-such-target' -e '^makefile' | \ + awk '/^[^.%][-A-Za-z0-9_]*:/ { print substr($$1, 1, length($$1)-1) }' | sort -u +.PHONY: help diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk new file mode 100644 index 00000000000..97b7bd36d36 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk @@ -0,0 +1,65 @@ +TMP_GOPATH :=$(shell mktemp -d) + + +.ensure-go-bindata: + ln -s $(abspath ./vendor) "$(TMP_GOPATH)/src" + export GO111MODULE=off && export GOPATH=$(TMP_GOPATH) && export GOBIN=$(TMP_GOPATH)/bin && go install "./vendor/github.com/jteeuwen/go-bindata/..." + +# $1 - input dirs +# $2 - prefix +# $3 - pkg +# $4 - output +# $5 - output prefix +define run-bindata + $(TMP_GOPATH)/bin/go-bindata -nocompress -nometadata \ + -prefix "$(2)" \ + -pkg "$(3)" \ + -o "$(5)$(4)" \ + -ignore "OWNERS" \ + $(1) && \ + gofmt -s -w "$(5)$(4)" +endef + +# $1 - name +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +define add-bindata-internal +update-bindata-$(1): .ensure-go-bindata + $(call run-bindata,$(2),$(3),$(4),$(5),) +.PHONY: update-bindata-$(1) + +update-bindata: update-bindata-$(1) +.PHONY: update-bindata + + +verify-bindata-$(1): .ensure-go-bindata +verify-bindata-$(1): TMP_DIR := $$(shell mktemp -d) +verify-bindata-$(1): + $(call run-bindata,$(2),$(3),$(4),$(5),$$(TMP_DIR)/) && \ + diff -Naup {.,$$(TMP_DIR)}/$(5) +.PHONY: verify-bindata-$(1) + +verify-bindata: verify-bindata-$(1) +.PHONY: verify-bindata +endef + + +update-generated: update-bindata +.PHONY: update-bindata + +update: update-generated +.PHONY: update + + +verify-generated: verify-bindata +.PHONY: verify-bindata + +verify: verify-generated +.PHONY: verify + + +define add-bindata +$(eval $(call add-bindata-internal,$(1),$(2),$(3),$(4),$(5))) +endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/codegen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/codegen.mk new file mode 100644 index 00000000000..247de9417ca --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/codegen.mk @@ -0,0 +1,41 @@ +CODEGEN_PKG ?=./vendor/k8s.io/code-generator/ +CODEGEN_GENERATORS ?=all +CODEGEN_OUTPUT_BASE ?=../../.. +CODEGEN_GO_HEADER_FILE ?=/dev/null + +CODEGEN_API_PACKAGE ?=$(error CODEGEN_API_PACKAGE is required) +CODEGEN_GROUPS_VERSION ?=$(error CODEGEN_GROUPS_VERSION is required) +CODEGEN_OUTPUT_PACKAGE ?=$(error CODEGEN_OUTPUT_PACKAGE is required) + +define run-codegen +$(CODEGEN_PKG)/generate-groups.sh \ + "$(CODEGEN_GENERATORS)" \ + "$(CODEGEN_OUTPUT_PACKAGE)" \ + "$(CODEGEN_API_PACKAGE)" \ + "$(CODEGEN_GROUPS_VERSION)" \ + --output-base $(CODEGEN_OUTPUT_BASE) \ + --go-header-file $(CODEGEN_GO_HEADER_FILE) \ + $1 +endef + + +verify-codegen: + $(call run-codegen,--verify-only) +.PHONY: verify-codegen + +verify-generated: verify-codegen +.PHONY: verify-generated + +verify: verify-generated +.PHONY: verify + + +update-codegen: + $(call run-codegen) +.PHONY: update-codegen + +update-generated: update-codegen +.PHONY: update-generated + +update: update-generated +.PHONY: update diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk new file mode 100644 index 00000000000..eeb29d7d356 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk @@ -0,0 +1,31 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +CONTROLLER_GEN_VERSION ?=v0.2.1-37-ga3cca5d +CONTROLLER_GEN ?=$(PERMANENT_TMP_GOPATH)/bin/controller-gen +controller_gen_dir :=$(dir $(CONTROLLER_GEN)) + +ensure-controller-gen: +ifeq "" "$(wildcard $(CONTROLLER_GEN))" + $(info Installing controller-gen into '$(CONTROLLER_GEN)') + mkdir -p '$(controller_gen_dir)' + curl -s -f -L https://github.com/openshift/kubernetes-sigs-controller-tools/releases/download/$(CONTROLLER_GEN_VERSION)/controller-gen-$(GOHOSTOS)-$(GOHOSTARCH) -o '$(CONTROLLER_GEN)' + chmod +x '$(CONTROLLER_GEN)'; +else + $(info Using existing controller-gen from "$(CONTROLLER_GEN)") +endif +.PHONY: ensure-controller-gen + +clean-controller-gen: + $(RM) '$(CONTROLLER_GEN)' + if [ -d '$(controller_gen_dir)' ]; then rmdir --ignore-fail-on-non-empty -p '$(controller_gen_dir)'; fi +.PHONY: clean-controller-gen + +clean: clean-controller-gen + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk new file mode 100644 index 00000000000..2e152fd65ec --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk @@ -0,0 +1,80 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +# $1 - crd file +# $2 - patch file +define patch-crd + $(YQ) m -i -x '$(1)' '$(2)' + +endef + +empty := + +define diff-file + diff -Naup '$(1)' '$(2)' + +endef + +# $1 - apis +# $2 - manifests +# $3 - output +define run-crd-gen + '$(CONTROLLER_GEN)' \ + schemapatch:manifests="$(2)" \ + paths="$(subst $(empty) ,;,$(1))" \ + output:dir="$(3)" + $$(foreach p,$$(wildcard $(2)/*.crd.yaml-merge-patch),$$(call patch-crd,$$(subst $(2),$(3),$$(basename $$(p))).yaml,$$(p))) +endef + + +# $1 - target name +# $2 - apis +# $3 - manifests +# $4 - output +define add-crd-gen-internal + +update-codegen-crds-$(1): ensure-controller-gen ensure-yq + $(call run-crd-gen,$(2),$(3),$(4)) +.PHONY: update-codegen-crds-$(1) + +update-codegen-crds: update-codegen-crds-$(1) +.PHONY: update-codegen-crds + +verify-codegen-crds-$(1): VERIFY_CODEGEN_CRD_TMP_DIR:=$(shell mktemp -d) +verify-codegen-crds-$(1): ensure-controller-gen ensure-yq + $(call run-crd-gen,$(2),$(3),$$(VERIFY_CODEGEN_CRD_TMP_DIR)) + $$(foreach p,$$(wildcard $(3)/*.crd.yaml),$$(call diff-file,$$(p),$$(subst $(3),$$(VERIFY_CODEGEN_CRD_TMP_DIR),$$(p)))) +.PHONY: verify-codegen-crds-$(1) + +verify-codegen-crds: verify-codegen-crds-$(1) +.PHONY: verify-codegen-crds + +endef + + +update-generated: update-codegen-crds +.PHONY: update-generated + +update: update-generated +.PHONY: update + +verify-generated: verify-codegen-crds +.PHONY: verify-generated + +verify: verify-generated +.PHONY: verify + + +define add-crd-gen +$(eval $(call add-crd-gen-internal,$(1),$(2),$(3),$(4))) +endef + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ + ../../targets/openshift/controller-gen.mk \ + ../../targets/openshift/yq.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/deps.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/deps.mk new file mode 100644 index 00000000000..fafa8f9dada --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/deps.mk @@ -0,0 +1,35 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) +scripts_dir :=$(self_dir)/../../../scripts + +# We need to force localle so different envs sort files the same way for recursive traversals +deps_diff :=LC_COLLATE=C diff --no-dereference -N + +update-deps: + $(scripts_dir)/$@.sh +.PHONY: update-deps + +# $1 - temporary directory to restore vendor dependencies from glide.lock +define restore-deps + ln -s $(abspath ./) "$(1)"/current + cp -R -H ./ "$(1)"/updated + $(RM) -r "$(1)"/updated/vendor + cd "$(1)"/updated && glide install --strip-vendor && find ./vendor -name '.hg_archival.txt' -delete + cd "$(1)" && $(deps_diff) -r {current,updated}/vendor/ > updated/glide.diff || true +endef + +verify-deps: tmp_dir:=$(shell mktemp -d) +verify-deps: + $(call restore-deps,$(tmp_dir)) + @echo $(deps_diff) '$(tmp_dir)'/{current,updated}/glide.diff + @ $(deps_diff) '$(tmp_dir)'/{current,updated}/glide.diff || ( \ + echo "ERROR: Content of 'vendor/' directory doesn't match 'glide.lock' and the overrides in 'glide.diff'!" && \ + echo "If this is an intentional change (a carry patch) please update the 'glide.diff' using 'make update-deps-overrides'." && \ + exit 1 \ + ) +.PHONY: verify-deps + +update-deps-overrides: tmp_dir:=$(shell mktemp -d) +update-deps-overrides: + $(call restore-deps,$(tmp_dir)) + cp "$(tmp_dir)"/{updated,current}/glide.diff +.PHONY: update-deps-overrides diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/doc.go new file mode 100644 index 00000000000..66ba5512e1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk new file mode 100644 index 00000000000..00e76ac2613 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk @@ -0,0 +1,29 @@ +# IMAGE_BUILD_EXTRA_FLAGS lets you add extra flags for imagebuilder +# e.g. to mount secrets and repo information into base image like: +# make images IMAGE_BUILD_EXTRA_FLAGS='-mount ~/projects/origin-repos/4.2/:/etc/yum.repos.d/' +IMAGE_BUILD_DEFAULT_FLAGS ?=--allow-pull +IMAGE_BUILD_EXTRA_FLAGS ?= + +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context +define build-image-internal +image-$(1): + $(strip \ + imagebuilder \ + $(IMAGE_BUILD_DEFAULT_FLAGS) \ + -t $(2) + -f $(3) \ + $(IMAGE_BUILD_EXTRA_FLAGS) \ + $(4) \ + ) +.PHONY: image-$(1) + +images: image-$(1) +.PHONY: images +endef + +define build-image +$(eval $(call build-image-internal,$(1),$(2),$(3),$(4))) +endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/doc.go new file mode 100644 index 00000000000..66ba5512e1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/release.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/release.mk new file mode 100644 index 00000000000..07fc5605a84 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator/release.mk @@ -0,0 +1,7 @@ +# If we need unified behaviour specific to operators, this folder is the place. + +# It seems that our previous origin-release jq based replacement is suppose to be done +# with `oc adm release new` so it might drop this target. +#origin-release: +# $(error Not implemented.) +#.PHONY: origin-release diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk new file mode 100644 index 00000000000..3f20bb1ca9d --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk @@ -0,0 +1,41 @@ +RPM_OUTPUT_DIR ?=_output +RPM_TOPDIR ?=$(abspath ./) +RPM_BUILDDIR ?=$(RPM_TOPDIR) +RPM_BUILDROOT ?=$(RPM_TOPDIR) +RPM_SOURCEDIR ?=$(RPM_TOPDIR) +RPM_SPECDIR ?=$(RPM_TOPDIR) +RPM_RPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/rpms +RPM_SRCRPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/srpms + +RPM_SPECFILES ?=$(wildcard *.spec) +RPM_BUILDFLAGS ?=-ba +RPM_EXTRAFLAGS ?= + +rpm-build: + $(strip \ + rpmbuild $(RPM_BUILDFLAGS) \ + --define "_topdir $(RPM_TOPDIR)" \ + --define "_builddir $(RPM_BUILDDIR)" \ + --define "_buildrootdir $(RPM_BUILDROOT)" \ + --define "_rpmdir $(RPM_RPMDIR)" \ + --define "_srcrpmdir $(RPM_SRCRPMDIR)" \ + --define "_specdir $(RPM_SPECDIR)" \ + --define "_sourcedir $(RPM_SOURCEDIR)" \ + --define "go_package $(GO_PACKAGE)" \ + $(RPM_EXTRAFLAGS) \ + $(RPM_SPECFILES) \ + ) + +clean-rpms: + $(RM) -r '$(RPM_RPMDIR)' '$(RPM_SRCRPMDIR)' + if [ -d '$(RPM_OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(RPM_OUTPUT_DIR)'; fi +.PHONY: clean-rpms + +clean: clean-rpms + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk new file mode 100644 index 00000000000..2679e3add39 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk @@ -0,0 +1,32 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +YQ ?=$(PERMANENT_TMP_GOPATH)/bin/yq +yq_dir :=$(dir $(YQ)) + + +ensure-yq: +ifeq "" "$(wildcard $(YQ))" + $(info Installing yq into '$(YQ)') + mkdir -p '$(yq_dir)' + curl -s -f -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_$(GOHOSTOS)_$(GOHOSTARCH) -o '$(YQ)' + chmod +x '$(YQ)'; +else + $(info Using existing yq from "$(YQ)") +endif +.PHONY: ensure-yq + +clean-yq: + $(RM) '$(YQ)' + if [ -d '$(yq_dir)' ]; then rmdir --ignore-fail-on-non-empty -p '$(yq_dir)'; fi +.PHONY: clean-yq + +clean: clean-yq + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ +) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/doc.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/doc.go new file mode 100644 index 00000000000..66ba5512e1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/update-deps.sh b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/update-deps.sh new file mode 100755 index 00000000000..46812e939c2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/scripts/update-deps.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e + +readonly GLIDE_MINOR_VERSION="13" +readonly REQUIRED_GLIDE_VERSION="0.$GLIDE_MINOR_VERSION" + +function verify_glide_version() { + if ! command -v glide &> /dev/null; then + echo "[FATAL] Glide was not found in \$PATH. Please install version ${REQUIRED_GLIDE_VERSION} or newer." + exit 1 + fi + + local glide_version + glide_version=($(glide --version)) + if ! echo "${glide_version[2]#v}" | awk -F. -v min=$GLIDE_MINOR_VERSION '{ exit $2 < min }'; then + echo "Detected glide version: ${glide_version[*]}." + echo "Please install Glide version ${REQUIRED_GLIDE_VERSION} or newer." + exit 1 + fi +} + +verify_glide_version + +glide update --strip-vendor + +# glide doesn't handle mercurial properly and leaves internal files (equivalent of .git/) laying around +# Given those files differ by mercurial version it was cloned with, verify-deps would break +find ./vendor -name '.hg_archival.txt' -delete diff --git a/vendor/github.com/openshift/library-go/glide.lock b/vendor/github.com/openshift/library-go/glide.lock new file mode 100644 index 00000000000..f7f48fe19a1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/glide.lock @@ -0,0 +1,1013 @@ +hash: 3095b969974ba27081bfe48993b336f7defa680c29541c553144eb955cfe43da +updated: 2019-11-01T10:28:41.529138+01:00 +imports: +- name: bitbucket.org/ww/goautoneg + version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 + vcs: hg +- name: github.com/Azure/go-ansiterm + version: d6e3b3328b783f23731bc4d058875b0371ff8109 + subpackages: + - winterm +- name: github.com/beorn7/perks + version: 3a771d992973f24aa725d07868b467d1ddfceafb + subpackages: + - quantile +- name: github.com/blang/semver + version: b38d23b8782a487059e8fc8773e9a5b228a77cb6 +- name: github.com/certifi/gocertifi + version: ee1a9a0726d2ae45f54118cac878c990d4016ded +- name: github.com/containerd/continuity + version: 75bee3e2ccb6402e3a986ab8bd3b17003fc0fdec + subpackages: + - fs + - pathdriver + - syscallx + - sysx +- name: github.com/coreos/etcd + version: 94745a4eed0425653b3b4275a208d38babceeaec + subpackages: + - auth/authpb + - clientv3 + - clientv3/balancer + - clientv3/balancer/connectivity + - clientv3/balancer/picker + - clientv3/balancer/resolver/endpoint + - clientv3/credentials + - etcdserver/api/v3rpc/rpctypes + - etcdserver/etcdserverpb + - mvcc/mvccpb + - pkg/logutil + - pkg/systemd + - pkg/tlsutil + - pkg/transport + - pkg/types + - raft + - raft/raftpb +- name: github.com/coreos/go-systemd + version: 39ca1b05acc7ad1220e09f133283b8859a8b71ab + subpackages: + - daemon + - journal +- name: github.com/coreos/pkg + version: 97fdf19511ea361ae1c100dd393cc47f8dcfa1e1 + subpackages: + - capnslog +- name: github.com/davecgh/go-spew + version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 + subpackages: + - spew +- name: github.com/docker/distribution + version: 16128bbac47f75050e82f7e91b04df33775e0c23 + subpackages: + - digestset + - manifest + - manifest/schema1 + - manifest/schema2 + - metrics + - reference + - registry/api/errcode + - registry/api/v2 + - registry/client + - registry/client/auth + - registry/client/auth/challenge + - registry/client/transport + - registry/storage/cache + - registry/storage/cache/memory +- name: github.com/docker/docker + version: be7ac8be2ae072032a4005e8f232be3fc57e4127 + subpackages: + - api/types + - api/types/blkiodev + - api/types/container + - api/types/filters + - api/types/mount + - api/types/network + - api/types/registry + - api/types/strslice + - api/types/swarm + - api/types/swarm/runtime + - api/types/versions + - errdefs + - opts + - pkg/archive + - pkg/fileutils + - pkg/homedir + - pkg/idtools + - pkg/ioutils + - pkg/jsonmessage + - pkg/longpath + - pkg/mount + - pkg/pools + - pkg/stdcopy + - pkg/system + - pkg/term + - pkg/term/windows +- name: github.com/docker/go-connections + version: 3ede32e2033de7505e6500d6c868c2b9ed9f169d + subpackages: + - nat +- name: github.com/docker/go-metrics + version: b619b3592b65de4f087d9f16863a7e6ff905973c +- name: github.com/docker/go-units + version: 47565b4f722fb6ceae66b95f853feed578a4a51c +- name: github.com/docker/libnetwork + version: 7f13a5c99f4bb76a4122035d495984b6a09739bb + subpackages: + - ipamutils +- name: github.com/docker/libtrust + version: aabc10ec26b754e797f9028f4589c5b7bd90dc20 +- name: github.com/emicklei/go-restful + version: ff4f55a206334ef123e4f79bbf348980da81ca46 + subpackages: + - log +- name: github.com/evanphx/json-patch + version: 5858425f75500d40c52783dce87d085a483ce135 +- name: github.com/fsouza/go-dockerclient + version: da3951ba2e9e02bc0e7642150b3e265aed7e1df3 +- name: github.com/getsentry/raven-go + version: c977f96e109525a5d8fa10a19165341f601f38b0 +- name: github.com/ghodss/yaml + version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee +- name: github.com/go-openapi/jsonpointer + version: 46af16f9f7b149af66e5d1bd010e3574dc06de98 +- name: github.com/go-openapi/jsonreference + version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272 +- name: github.com/go-openapi/spec + version: 6aced65f8501fe1217321abf0749d354824ba2ff +- name: github.com/go-openapi/swag + version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72 +- name: github.com/gogo/protobuf + version: 65acae22fc9d1fe290b33faa2bd64cdc20a463a0 + subpackages: + - gogoproto + - proto + - protoc-gen-gogo/descriptor + - sortkeys +- name: github.com/golang/groupcache + version: 02826c3e79038b59d737d3b1c0a1d937f71a4433 + subpackages: + - lru +- name: github.com/golang/protobuf + version: b5d812f8a3706043e23a9cd5babf2e5423744d30 + subpackages: + - proto + - protoc-gen-go/descriptor + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/timestamp +- name: github.com/gonum/blas + version: f22b278b28ac9805aadd613a754a60c35b24ae69 + subpackages: + - blas64 + - native + - native/internal/math32 +- name: github.com/gonum/floats + version: c233463c7e827fd71a8cdb62dfda0e98f7c39ad5 +- name: github.com/gonum/graph + version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 + subpackages: + - encoding/dot + - formats/dot + - formats/dot/ast + - formats/dot/internal/astx + - formats/dot/internal/errors + - formats/dot/internal/lexer + - formats/dot/internal/parser + - formats/dot/internal/token + - internal/ordered + - simple +- name: github.com/gonum/internal + version: f884aa71402950fb2796dbea0d5aa9ef9cfad8ca + subpackages: + - asm/f32 + - asm/f64 +- name: github.com/gonum/lapack + version: e4cdc5a0bff924bb10be88482e635bd40429f65e + subpackages: + - lapack64 + - native +- name: github.com/gonum/matrix + version: c518dec07be9a636c38a4650e217be059b5952ec + subpackages: + - mat64 +- name: github.com/google/go-cmp + version: 6f77996f0c42f7b84e5a2b252227263f93432e9b + subpackages: + - cmp + - cmp/internal/diff + - cmp/internal/flags + - cmp/internal/function + - cmp/internal/value +- name: github.com/google/gofuzz + version: f140a6486e521aad38f5917de355cbf147cc0496 +- name: github.com/google/uuid + version: 0cd6bf5da1e1c83f8b45653022c74f71af0538a4 +- name: github.com/googleapis/gnostic + version: 0c5108395e2debce0d731cf0287ddf7242066aba + subpackages: + - OpenAPIv2 + - compiler + - extensions +- name: github.com/gorilla/mux + version: f395758b854c4efa789b8c1e9b73479704c548cb +- name: github.com/grpc-ecosystem/go-grpc-prometheus + version: c225b8c3b01faf2899099b768856a9e916e5087b +- name: github.com/hashicorp/golang-lru + version: 7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c + subpackages: + - simplelru +- name: github.com/imdario/mergo + version: 9316a62528ac99aaecb4e47eadd6dc8aa6533d58 +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/json-iterator/go + version: 27518f6661eba504be5a7a9a9f6d9460d892ade3 +- name: github.com/jteeuwen/go-bindata + version: a0ff2567cfb70903282db057e799fd826784d41d +- name: github.com/konsorten/go-windows-terminal-sequences + version: 5c8c8bd35d3832f5d134ae1e1e375b69a4d25242 +- name: github.com/kubernetes-sigs/kube-storage-version-migrator + version: bdca3bf7d454105b7853d0d60b385877486a9dc6 + repo: https://github.com/openshift/kubernetes-kube-storage-version-migrator.git + subpackages: + - pkg/apis/migration/v1alpha1 + - pkg/clients/clientset + - pkg/clients/clientset/scheme + - pkg/clients/clientset/typed/migration/v1alpha1 + - pkg/clients/informer/internalinterfaces + - pkg/clients/informer/migration/v1alpha1 + - pkg/clients/lister/migration/v1alpha1 +- name: github.com/mailru/easyjson + version: d5b7844b561a7bc640052f1b935f7b800330d7e0 + subpackages: + - buffer + - jlexer + - jwriter +- name: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c + subpackages: + - pbutil +- name: github.com/Microsoft/go-winio + version: 97e4973ce50b2ff5f09635a57e2b88a037aae829 +- name: github.com/modern-go/concurrent + version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 +- name: github.com/modern-go/reflect2 + version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 +- name: github.com/morikuni/aec + version: 39771216ff4c63d11f5e604076f9c45e8be1067b +- name: github.com/munnerz/goautoneg + version: a547fc61f48d567d5b4ec6f8aee5573d8efce11d +- name: github.com/NYTimes/gziphandler + version: 56545f4a5d46df9a6648819d1664c3a03a13ffdb +- name: github.com/opencontainers/go-digest + version: 279bed98673dd5bef374d3b6e4b09e2af76183bf +- name: github.com/opencontainers/image-spec + version: d60099175f88c47cd379c4738d158884749ed235 + subpackages: + - specs-go + - specs-go/v1 +- name: github.com/opencontainers/runc + version: b133feaeeb2e69ba94aa95eac3a455a143435ea9 + subpackages: + - libcontainer/system + - libcontainer/user +- name: github.com/openshift/api + version: bac53ab1eb76f33fe58ad587f6894bd84cc6c5ef + subpackages: + - apps + - apps/v1 + - authorization + - authorization/v1 + - build + - build/v1 + - config + - config/v1 + - image + - image/docker10 + - image/dockerpre012 + - image/v1 + - kubecontrolplane + - kubecontrolplane/v1 + - legacyconfig/v1 + - network + - network/v1 + - oauth + - oauth/v1 + - openshiftcontrolplane + - openshiftcontrolplane/v1 + - operator + - operator/v1 + - operator/v1alpha1 + - osin + - osin/v1 + - pkg/serialization + - project + - project/v1 + - quota + - quota/v1 + - route + - route/v1 + - security + - security/v1 + - servicecertsigner + - servicecertsigner/v1alpha1 + - template + - template/v1 + - unidling/v1alpha1 + - user + - user/v1 +- name: github.com/openshift/client-go + version: 2823239d2298214509c3536714f684101799e81b + subpackages: + - apps/clientset/versioned/scheme + - apps/clientset/versioned/typed/apps/v1 + - config/clientset/versioned + - config/clientset/versioned/fake + - config/clientset/versioned/scheme + - config/clientset/versioned/typed/config/v1 + - config/clientset/versioned/typed/config/v1/fake + - config/informers/externalversions + - config/informers/externalversions/config + - config/informers/externalversions/config/v1 + - config/informers/externalversions/internalinterfaces + - config/listers/config/v1 + - quota/clientset/versioned + - quota/clientset/versioned/fake + - quota/clientset/versioned/scheme + - quota/clientset/versioned/typed/quota/v1 + - quota/clientset/versioned/typed/quota/v1/fake + - quota/informers/externalversions + - quota/informers/externalversions/internalinterfaces + - quota/informers/externalversions/quota + - quota/informers/externalversions/quota/v1 + - quota/listers/quota/v1 + - route/clientset/versioned + - route/clientset/versioned/fake + - route/clientset/versioned/scheme + - route/clientset/versioned/typed/route/v1 + - route/clientset/versioned/typed/route/v1/fake +- name: github.com/pborman/uuid + version: 8b1b92947f46224e3b97bb1a3a5b0382be00d31e +- name: github.com/pkg/errors + version: 645ef00459ed84a119197bfb8d8205042c6df63d +- name: github.com/pkg/profile + version: f6fe06335df110bcf1ed6d4e852b760bfc15beee +- name: github.com/pmezard/go-difflib + version: 792786c7400a136282c1664665ae0a8db921c6c2 + subpackages: + - difflib +- name: github.com/prometheus/client_golang + version: 505eaef017263e299324067d40ca2c48f6a2cf50 + subpackages: + - prometheus + - prometheus/internal + - prometheus/promhttp +- name: github.com/prometheus/client_model + version: 5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f + subpackages: + - go +- name: github.com/prometheus/common + version: 4724e9255275ce38f7179b2478abeae4e28c904f + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: 1dc9a6cbc91aacc3e8b2d63db4d2e957a5394ac4 + subpackages: + - internal/util + - nfs + - xfs +- name: github.com/PuerkitoBio/purell + version: 8a290539e2e8629dbc4e6bad948158f790ec31f4 +- name: github.com/PuerkitoBio/urlesc + version: 5bd2802263f21d8788851d5305584c82a5c75d7e +- name: github.com/sigma/go-inotify + version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 +- name: github.com/sirupsen/logrus + version: 839c75faf7f98a33d445d181f3018b5c3409a45e +- name: github.com/spf13/cobra + version: 67fc4837d267bc9bfd6e47f77783fcc3dffc68de +- name: github.com/spf13/pflag + version: 298182f68c66c05229eb03ac171abe6e309ee79a +- name: github.com/stretchr/testify + version: ffdc059bfe9ce6a4e144ba849dbedead332c6053 + subpackages: + - assert + - require +- name: go.uber.org/atomic + version: 8dc6146f7569370a472715e178d8ae31172ee6da +- name: go.uber.org/multierr + version: ddea229ff1dff9e6fe8a6c0344ac73b09e81fce5 +- name: go.uber.org/zap + version: 67bc79d13d155c02fd008f721863ff8cc5f30659 + subpackages: + - buffer + - internal/bufferpool + - internal/color + - internal/exit + - zapcore +- name: golang.org/x/crypto + version: e84da0312774c21d64ee2317962ef669b27ffb41 + subpackages: + - bcrypt + - blowfish + - cryptobyte + - cryptobyte/asn1 + - internal/subtle + - nacl/secretbox + - poly1305 + - salsa20/salsa + - ssh/terminal +- name: golang.org/x/net + version: cdfb69ac37fc6fa907650654115ebebb3aae2087 + subpackages: + - context + - context/ctxhttp + - http/httpguts + - http2 + - http2/hpack + - idna + - internal/timeseries + - trace + - websocket +- name: golang.org/x/oauth2 + version: 9f3314589c9a9136388751d9adae6b0ed400978a + subpackages: + - internal +- name: golang.org/x/sync + version: 42b317875d0fa942474b76e1b46a6060d720ae6e + subpackages: + - errgroup +- name: golang.org/x/sys + version: 3b5209105503162ded1863c307ac66fec31120dd + subpackages: + - unix + - windows +- name: golang.org/x/text + version: e6919f6577db79269a6443b9dc46d18f2238fb5d + subpackages: + - cases + - internal + - internal/language + - internal/language/compact + - internal/tag + - language + - runes + - secure/bidirule + - secure/precis + - transform + - unicode/bidi + - unicode/norm + - width +- name: golang.org/x/time + version: f51c12702a4d776e4c1fa9b0fabab841babae631 + subpackages: + - rate +- name: golang.org/x/tools + version: 6c7e314b6563ee0a4dc169c7ee9960e0b709c035 + subpackages: + - container/intsets +- name: google.golang.org/appengine + version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 + subpackages: + - internal + - internal/base + - internal/datastore + - internal/log + - internal/remote_api + - internal/urlfetch + - urlfetch +- name: google.golang.org/genproto + version: 54afdca5d873f7b529e2ce3def1a99df16feda90 + subpackages: + - googleapis/api/annotations + - googleapis/rpc/status +- name: google.golang.org/grpc + version: 6eaf6f47437a6b4e2153a190160ef39a92c7eceb + subpackages: + - balancer + - balancer/base + - balancer/roundrobin + - binarylog/grpc_binarylog_v1 + - codes + - connectivity + - credentials + - credentials/internal + - encoding + - encoding/proto + - grpclog + - internal + - internal/backoff + - internal/balancerload + - internal/binarylog + - internal/channelz + - internal/envconfig + - internal/grpcrand + - internal/grpcsync + - internal/syscall + - internal/transport + - keepalive + - metadata + - naming + - peer + - resolver + - resolver/dns + - resolver/passthrough + - serviceconfig + - stats + - status + - tap +- name: gopkg.in/asn1-ber.v1 + version: f715ec2f112d1e4195b827ad68cf44017a3ef2b1 +- name: gopkg.in/inf.v0 + version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/ldap.v2 + version: bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9 +- name: gopkg.in/natefinch/lumberjack.v2 + version: 7d6a1875575e09256dc552b4c0e450dcd02bd10e +- name: gopkg.in/yaml.v2 + version: 51d6538a90f86fe93ac480b35f37b2be17fef232 +- name: k8s.io/api + version: 95b840bb6a1f5f0462af804c8589396d294d4914 + subpackages: + - admission/v1 + - admission/v1beta1 + - admissionregistration/v1 + - admissionregistration/v1beta1 + - apps/v1 + - apps/v1beta1 + - apps/v1beta2 + - auditregistration/v1alpha1 + - authentication/v1 + - authentication/v1beta1 + - authorization/v1 + - authorization/v1beta1 + - autoscaling/v1 + - autoscaling/v2beta1 + - autoscaling/v2beta2 + - batch/v1 + - batch/v1beta1 + - batch/v2alpha1 + - certificates/v1beta1 + - coordination/v1 + - coordination/v1beta1 + - core/v1 + - discovery/v1alpha1 + - events/v1beta1 + - extensions/v1beta1 + - imagepolicy/v1alpha1 + - networking/v1 + - networking/v1beta1 + - node/v1alpha1 + - node/v1beta1 + - policy/v1beta1 + - rbac/v1 + - rbac/v1alpha1 + - rbac/v1beta1 + - scheduling/v1 + - scheduling/v1alpha1 + - scheduling/v1beta1 + - settings/v1alpha1 + - storage/v1 + - storage/v1alpha1 + - storage/v1beta1 +- name: k8s.io/apiextensions-apiserver + version: 8f644eb6e783291c4b8cb8cb25a9983be1a74f5c + subpackages: + - pkg/apis/apiextensions + - pkg/apis/apiextensions/v1 + - pkg/apis/apiextensions/v1beta1 + - pkg/client/clientset/clientset/scheme + - pkg/client/clientset/clientset/typed/apiextensions/v1beta1 +- name: k8s.io/apimachinery + version: 27d36303b6556f377b4f34e64705fa9024a12b0c + subpackages: + - pkg/api/apitesting + - pkg/api/equality + - pkg/api/errors + - pkg/api/meta + - pkg/api/resource + - pkg/api/validation + - pkg/api/validation/path + - pkg/apis/meta/internalversion + - pkg/apis/meta/v1 + - pkg/apis/meta/v1/unstructured + - pkg/apis/meta/v1/validation + - pkg/apis/meta/v1beta1 + - pkg/apis/meta/v1beta1/validation + - pkg/conversion + - pkg/conversion/queryparams + - pkg/fields + - pkg/labels + - pkg/runtime + - pkg/runtime/schema + - pkg/runtime/serializer + - pkg/runtime/serializer/json + - pkg/runtime/serializer/protobuf + - pkg/runtime/serializer/recognizer + - pkg/runtime/serializer/streaming + - pkg/runtime/serializer/versioning + - pkg/selection + - pkg/types + - pkg/util/cache + - pkg/util/clock + - pkg/util/diff + - pkg/util/errors + - pkg/util/framer + - pkg/util/intstr + - pkg/util/json + - pkg/util/mergepatch + - pkg/util/naming + - pkg/util/net + - pkg/util/rand + - pkg/util/runtime + - pkg/util/sets + - pkg/util/strategicpatch + - pkg/util/uuid + - pkg/util/validation + - pkg/util/validation/field + - pkg/util/wait + - pkg/util/waitgroup + - pkg/util/yaml + - pkg/version + - pkg/watch + - third_party/forked/golang/json + - third_party/forked/golang/reflect +- name: k8s.io/apiserver + version: bfa5e2e684ad413c22fd0dab55b2592af1ead049 + subpackages: + - pkg/admission + - pkg/admission/configuration + - pkg/admission/initializer + - pkg/admission/metrics + - pkg/admission/plugin/namespace/lifecycle + - pkg/admission/plugin/webhook + - pkg/admission/plugin/webhook/config + - pkg/admission/plugin/webhook/config/apis/webhookadmission + - pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1 + - pkg/admission/plugin/webhook/errors + - pkg/admission/plugin/webhook/generic + - pkg/admission/plugin/webhook/mutating + - pkg/admission/plugin/webhook/namespace + - pkg/admission/plugin/webhook/object + - pkg/admission/plugin/webhook/request + - pkg/admission/plugin/webhook/rules + - pkg/admission/plugin/webhook/validating + - pkg/apis/apiserver + - pkg/apis/apiserver/install + - pkg/apis/apiserver/v1alpha1 + - pkg/apis/audit + - pkg/apis/audit/install + - pkg/apis/audit/v1 + - pkg/apis/audit/v1alpha1 + - pkg/apis/audit/v1beta1 + - pkg/apis/audit/validation + - pkg/apis/config + - pkg/apis/config/v1 + - pkg/audit + - pkg/audit/event + - pkg/audit/policy + - pkg/audit/util + - pkg/authentication/authenticator + - pkg/authentication/authenticatorfactory + - pkg/authentication/group + - pkg/authentication/request/anonymous + - pkg/authentication/request/bearertoken + - pkg/authentication/request/headerrequest + - pkg/authentication/request/union + - pkg/authentication/request/websocket + - pkg/authentication/request/x509 + - pkg/authentication/serviceaccount + - pkg/authentication/token/cache + - pkg/authentication/token/tokenfile + - pkg/authentication/user + - pkg/authorization/authorizer + - pkg/authorization/authorizerfactory + - pkg/authorization/path + - pkg/authorization/union + - pkg/endpoints + - pkg/endpoints/discovery + - pkg/endpoints/filters + - pkg/endpoints/handlers + - pkg/endpoints/handlers/fieldmanager + - pkg/endpoints/handlers/fieldmanager/internal + - pkg/endpoints/handlers/negotiation + - pkg/endpoints/handlers/responsewriters + - pkg/endpoints/metrics + - pkg/endpoints/openapi + - pkg/endpoints/request + - pkg/features + - pkg/registry/generic + - pkg/registry/generic/registry + - pkg/registry/rest + - pkg/server + - pkg/server/egressselector + - pkg/server/filters + - pkg/server/healthz + - pkg/server/httplog + - pkg/server/mux + - pkg/server/options + - pkg/server/options/encryptionconfig + - pkg/server/resourceconfig + - pkg/server/routes + - pkg/server/storage + - pkg/storage + - pkg/storage/cacher + - pkg/storage/errors + - pkg/storage/etcd3 + - pkg/storage/etcd3/metrics + - pkg/storage/names + - pkg/storage/storagebackend + - pkg/storage/storagebackend/factory + - pkg/storage/value + - pkg/storage/value/encrypt/aes + - pkg/storage/value/encrypt/envelope + - pkg/storage/value/encrypt/envelope/v1beta1 + - pkg/storage/value/encrypt/identity + - pkg/storage/value/encrypt/secretbox + - pkg/util/dryrun + - pkg/util/feature + - pkg/util/flushwriter + - pkg/util/openapi + - pkg/util/webhook + - pkg/util/wsstream + - plugin/pkg/audit/buffered + - plugin/pkg/audit/dynamic + - plugin/pkg/audit/dynamic/enforced + - plugin/pkg/audit/log + - plugin/pkg/audit/truncate + - plugin/pkg/audit/webhook + - plugin/pkg/authenticator/token/webhook + - plugin/pkg/authorizer/webhook +- name: k8s.io/client-go + version: 1fbdaa4c8d908275eee958429b1cafc2591a2c5d + subpackages: + - discovery + - discovery/fake + - dynamic + - dynamic/dynamicinformer + - dynamic/dynamiclister + - dynamic/fake + - informers + - informers/admissionregistration + - informers/admissionregistration/v1 + - informers/admissionregistration/v1beta1 + - informers/apps + - informers/apps/v1 + - informers/apps/v1beta1 + - informers/apps/v1beta2 + - informers/auditregistration + - informers/auditregistration/v1alpha1 + - informers/autoscaling + - informers/autoscaling/v1 + - informers/autoscaling/v2beta1 + - informers/autoscaling/v2beta2 + - informers/batch + - informers/batch/v1 + - informers/batch/v1beta1 + - informers/batch/v2alpha1 + - informers/certificates + - informers/certificates/v1beta1 + - informers/coordination + - informers/coordination/v1 + - informers/coordination/v1beta1 + - informers/core + - informers/core/v1 + - informers/discovery + - informers/discovery/v1alpha1 + - informers/events + - informers/events/v1beta1 + - informers/extensions + - informers/extensions/v1beta1 + - informers/internalinterfaces + - informers/networking + - informers/networking/v1 + - informers/networking/v1beta1 + - informers/node + - informers/node/v1alpha1 + - informers/node/v1beta1 + - informers/policy + - informers/policy/v1beta1 + - informers/rbac + - informers/rbac/v1 + - informers/rbac/v1alpha1 + - informers/rbac/v1beta1 + - informers/scheduling + - informers/scheduling/v1 + - informers/scheduling/v1alpha1 + - informers/scheduling/v1beta1 + - informers/settings + - informers/settings/v1alpha1 + - informers/storage + - informers/storage/v1 + - informers/storage/v1alpha1 + - informers/storage/v1beta1 + - kubernetes + - kubernetes/fake + - kubernetes/scheme + - kubernetes/typed/admissionregistration/v1 + - kubernetes/typed/admissionregistration/v1/fake + - kubernetes/typed/admissionregistration/v1beta1 + - kubernetes/typed/admissionregistration/v1beta1/fake + - kubernetes/typed/apps/v1 + - kubernetes/typed/apps/v1/fake + - kubernetes/typed/apps/v1beta1 + - kubernetes/typed/apps/v1beta1/fake + - kubernetes/typed/apps/v1beta2 + - kubernetes/typed/apps/v1beta2/fake + - kubernetes/typed/auditregistration/v1alpha1 + - kubernetes/typed/auditregistration/v1alpha1/fake + - kubernetes/typed/authentication/v1 + - kubernetes/typed/authentication/v1/fake + - kubernetes/typed/authentication/v1beta1 + - kubernetes/typed/authentication/v1beta1/fake + - kubernetes/typed/authorization/v1 + - kubernetes/typed/authorization/v1/fake + - kubernetes/typed/authorization/v1beta1 + - kubernetes/typed/authorization/v1beta1/fake + - kubernetes/typed/autoscaling/v1 + - kubernetes/typed/autoscaling/v1/fake + - kubernetes/typed/autoscaling/v2beta1 + - kubernetes/typed/autoscaling/v2beta1/fake + - kubernetes/typed/autoscaling/v2beta2 + - kubernetes/typed/autoscaling/v2beta2/fake + - kubernetes/typed/batch/v1 + - kubernetes/typed/batch/v1/fake + - kubernetes/typed/batch/v1beta1 + - kubernetes/typed/batch/v1beta1/fake + - kubernetes/typed/batch/v2alpha1 + - kubernetes/typed/batch/v2alpha1/fake + - kubernetes/typed/certificates/v1beta1 + - kubernetes/typed/certificates/v1beta1/fake + - kubernetes/typed/coordination/v1 + - kubernetes/typed/coordination/v1/fake + - kubernetes/typed/coordination/v1beta1 + - kubernetes/typed/coordination/v1beta1/fake + - kubernetes/typed/core/v1 + - kubernetes/typed/core/v1/fake + - kubernetes/typed/discovery/v1alpha1 + - kubernetes/typed/discovery/v1alpha1/fake + - kubernetes/typed/events/v1beta1 + - kubernetes/typed/events/v1beta1/fake + - kubernetes/typed/extensions/v1beta1 + - kubernetes/typed/extensions/v1beta1/fake + - kubernetes/typed/networking/v1 + - kubernetes/typed/networking/v1/fake + - kubernetes/typed/networking/v1beta1 + - kubernetes/typed/networking/v1beta1/fake + - kubernetes/typed/node/v1alpha1 + - kubernetes/typed/node/v1alpha1/fake + - kubernetes/typed/node/v1beta1 + - kubernetes/typed/node/v1beta1/fake + - kubernetes/typed/policy/v1beta1 + - kubernetes/typed/policy/v1beta1/fake + - kubernetes/typed/rbac/v1 + - kubernetes/typed/rbac/v1/fake + - kubernetes/typed/rbac/v1alpha1 + - kubernetes/typed/rbac/v1alpha1/fake + - kubernetes/typed/rbac/v1beta1 + - kubernetes/typed/rbac/v1beta1/fake + - kubernetes/typed/scheduling/v1 + - kubernetes/typed/scheduling/v1/fake + - kubernetes/typed/scheduling/v1alpha1 + - kubernetes/typed/scheduling/v1alpha1/fake + - kubernetes/typed/scheduling/v1beta1 + - kubernetes/typed/scheduling/v1beta1/fake + - kubernetes/typed/settings/v1alpha1 + - kubernetes/typed/settings/v1alpha1/fake + - kubernetes/typed/storage/v1 + - kubernetes/typed/storage/v1/fake + - kubernetes/typed/storage/v1alpha1 + - kubernetes/typed/storage/v1alpha1/fake + - kubernetes/typed/storage/v1beta1 + - kubernetes/typed/storage/v1beta1/fake + - listers/admissionregistration/v1 + - listers/admissionregistration/v1beta1 + - listers/apps/v1 + - listers/apps/v1beta1 + - listers/apps/v1beta2 + - listers/auditregistration/v1alpha1 + - listers/autoscaling/v1 + - listers/autoscaling/v2beta1 + - listers/autoscaling/v2beta2 + - listers/batch/v1 + - listers/batch/v1beta1 + - listers/batch/v2alpha1 + - listers/certificates/v1beta1 + - listers/coordination/v1 + - listers/coordination/v1beta1 + - listers/core/v1 + - listers/discovery/v1alpha1 + - listers/events/v1beta1 + - listers/extensions/v1beta1 + - listers/networking/v1 + - listers/networking/v1beta1 + - listers/node/v1alpha1 + - listers/node/v1beta1 + - listers/policy/v1beta1 + - listers/rbac/v1 + - listers/rbac/v1alpha1 + - listers/rbac/v1beta1 + - listers/scheduling/v1 + - listers/scheduling/v1alpha1 + - listers/scheduling/v1beta1 + - listers/settings/v1alpha1 + - listers/storage/v1 + - listers/storage/v1alpha1 + - listers/storage/v1beta1 + - pkg/apis/clientauthentication + - pkg/apis/clientauthentication/v1alpha1 + - pkg/apis/clientauthentication/v1beta1 + - pkg/version + - plugin/pkg/client/auth/exec + - rest + - rest/watch + - restmapper + - scale + - scale/scheme + - scale/scheme/appsint + - scale/scheme/appsv1beta1 + - scale/scheme/appsv1beta2 + - scale/scheme/autoscalingv1 + - scale/scheme/extensionsint + - scale/scheme/extensionsv1beta1 + - testing + - third_party/forked/golang/template + - tools/auth + - tools/cache + - tools/clientcmd + - tools/clientcmd/api + - tools/clientcmd/api/latest + - tools/clientcmd/api/v1 + - tools/leaderelection + - tools/leaderelection/resourcelock + - tools/metrics + - tools/pager + - tools/record + - tools/record/util + - tools/reference + - tools/watch + - transport + - util/cert + - util/connrotation + - util/flowcontrol + - util/homedir + - util/jsonpath + - util/keyutil + - util/retry + - util/workqueue +- name: k8s.io/component-base + version: 547f6c5d70902c6683e93ad96f84adc6b943aedf + subpackages: + - cli/flag + - featuregate + - logs + - metrics + - metrics/legacyregistry + - version +- name: k8s.io/klog + version: 3ca30a56d8a775276f9cdae009ba326fdc05af7f +- name: k8s.io/kube-aggregator + version: 8c8f079fddc3d37cba5ac273f52d52d692994394 + subpackages: + - pkg/apis/apiregistration + - pkg/apis/apiregistration/v1 + - pkg/apis/apiregistration/v1beta1 + - pkg/client/clientset_generated/clientset/scheme + - pkg/client/clientset_generated/clientset/typed/apiregistration/v1 +- name: k8s.io/kube-openapi + version: 743ec37842bffe49dd4221d9026f30fb1d5adbc4 + subpackages: + - pkg/builder + - pkg/common + - pkg/handler + - pkg/schemaconv + - pkg/util + - pkg/util/proto +- name: k8s.io/utils + version: 581e00157fb1a0435d4fac54a52d1ca1e481d60e + subpackages: + - buffer + - diff + - field + - integer + - path + - pointer + - trace +- name: sigs.k8s.io/structured-merge-diff + version: 0c1d754dd6485aa156ff3c8c599079adac6ec82f + subpackages: + - fieldpath + - merge + - schema + - typed + - value +- name: sigs.k8s.io/yaml + version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 +testImports: +- name: vbom.ml/util + version: db5cfe13f5cc80a4990d98e2e1b0707a4d1a5394 + subpackages: + - sortorder diff --git a/vendor/github.com/openshift/library-go/glide.yaml b/vendor/github.com/openshift/library-go/glide.yaml new file mode 100644 index 00000000000..ed1f7445442 --- /dev/null +++ b/vendor/github.com/openshift/library-go/glide.yaml @@ -0,0 +1,70 @@ +package: github.com/openshift/library-go +import: + - package: k8s.io/apimachinery + version: kubernetes-1.16.0 + - package: k8s.io/api + version: kubernetes-1.16.0 + - package: k8s.io/apiserver + version: kubernetes-1.16.0 + - package: k8s.io/apiextensions-apiserver + version: kubernetes-1.16.0 + - package: k8s.io/kube-aggregator + version: kubernetes-1.16.0 + - package: k8s.io/client-go + version: kubernetes-1.16.0 + - package: github.com/openshift/api + version: master + - package: github.com/openshift/client-go + version: master + + # sig-master - needed until https://github.com/kubernetes-sigs/kube-storage-version-migrator/pull/41 merged + - package: github.com/kubernetes-sigs/kube-storage-version-migrator + repo: https://github.com/openshift/kubernetes-kube-storage-version-migrator.git + version: master + + # sig-master - needed for file observer + - package: github.com/sigma/go-inotify + version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 + # sig-master + - package: github.com/getsentry/raven-go + version: c977f96e109525a5d8fa10a19165341f601f38b0 + # sig-master - transitive through raven-go, this matches the kube level + - package: github.com/pkg/errors + version: v0.8.0 + # sig-master - transitive through raven-go, this is the level we had when we noticed + - package: github.com/certifi/gocertifi + version: ee1a9a0726d2ae45f54118cac878c990d4016ded + - package: github.com/jteeuwen/go-bindata + version: a0ff2567cfb70903282db057e799fd826784d41d + + # matches kube + - package: github.com/spf13/cobra + version: v0.0.4 + - package: github.com/spf13/pflag + version: 1.0.3 + - package: github.com/sirupsen/logrus + version: v1.4.2 + - package: github.com/blang/semver + version: v3.5.0 + - package: github.com/imdario/mergo + version: v0.3.5 + - package: sigs.k8s.io/structured-merge-diff + version: 0c1d754dd648 # matches 1.16.0-beta.2 go.mod + + + # matches openshift/origin + - package: github.com/gonum/graph + version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 + + # devexp: + - package: github.com/fsouza/go-dockerclient + version: da3951ba2e9e02bc0e7642150b3e265aed7e1df3 # matching origin 4.2 level + - package: github.com/docker/distribution + # See https://github.com/openshift/image-registry/pull/126/commits/eb32acef7827ac2227c3aeaddc444880ed98edb3 leading to https://github.com/openshift/docker-distribution/commits/image-registry-3.11 + version: 16128bbac47f75050e82f7e91b04df33775e0c23 # level currently used in origin to base the origin patches on. + - package: github.com/docker/libnetwork + version: 7f13a5c99f4bb76a4122035d495984b6a09739bb # required for docker/docker/opts + + # VCS issues + - package: bitbucket.org/ww/goautoneg + vcs: hg diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go new file mode 100644 index 00000000000..ecaf34d8a1f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go @@ -0,0 +1,25 @@ +package admissionregistrationtesting + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" +) + +func AdmissionRegistrationTest(registeredAdmission *admission.Plugins, orderedAdmissionPlugins []string, defaultOffPlugins sets.String) error { + errs := []error{} + registeredPlugins := sets.NewString(registeredAdmission.Registered()...) + orderedAdmissionPluginsSet := sets.NewString(orderedAdmissionPlugins...) + + // make sure that all orderedAdmissionPlugins are registered + if diff := orderedAdmissionPluginsSet.Difference(registeredPlugins); len(diff) > 0 { + errs = append(errs, fmt.Errorf("registered plugins missing admission plugins: %v", diff.List())) + } + if diff := defaultOffPlugins.Difference(orderedAdmissionPluginsSet); len(diff) > 0 { + errs = append(errs, fmt.Errorf("ordered admission plugins missing defaultOff plugins: %v", diff.List())) + } + + return errors.NewAggregate(errs) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go new file mode 100644 index 00000000000..5b4dc1036b0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go @@ -0,0 +1,30 @@ +package admissionrestconfig + +import ( + "k8s.io/apiserver/pkg/admission" + restclient "k8s.io/client-go/rest" +) + +func NewInitializer(restClientConfig restclient.Config) admission.PluginInitializer { + return &localInitializer{ + restClientConfig: restClientConfig, + } +} + +// WantsRESTClientConfig gives access to a RESTClientConfig. It's useful for doing unusual things with transports. +type WantsRESTClientConfig interface { + SetRESTClientConfig(restclient.Config) + admission.InitializationValidator +} + +type localInitializer struct { + restClientConfig restclient.Config +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsRESTClientConfig); ok { + wants.SetRESTClientConfig(i.restClientConfig) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go new file mode 100644 index 00000000000..3b2d2455404 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go @@ -0,0 +1,22 @@ +package admissiontimeout + +import ( + "time" + + "k8s.io/apiserver/pkg/admission" +) + +// AdmissionTimeout provides a decorator that will fail an admission plugin after a certain amount of time +// +// DEPRECATED: use the context of the admission handler instead. +type AdmissionTimeout struct { + Timeout time.Duration +} + +func (d AdmissionTimeout) WithTimeout(admissionPlugin admission.Interface, name string) admission.Interface { + return pluginHandlerWithTimeout{ + name: name, + admissionPlugin: admissionPlugin, + timeout: d.Timeout, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go new file mode 100644 index 00000000000..8667304edfd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go @@ -0,0 +1,68 @@ +package admissiontimeout + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" +) + +type pluginHandlerWithTimeout struct { + name string + admissionPlugin admission.Interface + timeout time.Duration +} + +var _ admission.ValidationInterface = &pluginHandlerWithTimeout{} +var _ admission.MutationInterface = &pluginHandlerWithTimeout{} + +func (p pluginHandlerWithTimeout) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +func (p pluginHandlerWithTimeout) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + + admissionDone := make(chan struct{}) + admissionErr := fmt.Errorf("default to mutation error") + go func() { + defer utilruntime.HandleCrash() + defer close(admissionDone) + admissionErr = mutatingHandler.Admit(ctx, a, o) + }() + + select { + case <-admissionDone: + return admissionErr + case <-time.After(p.timeout): + return errors.NewInternalError(fmt.Errorf("admission plugin %q failed to complete mutation in %v", p.name, p.timeout)) + } +} + +func (p pluginHandlerWithTimeout) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + + admissionDone := make(chan struct{}) + admissionErr := fmt.Errorf("default to validation error") + go func() { + defer utilruntime.HandleCrash() + defer close(admissionDone) + admissionErr = validatingHandler.Validate(ctx, a, o) + }() + + select { + case <-admissionDone: + return admissionErr + case <-time.After(p.timeout): + return errors.NewInternalError(fmt.Errorf("admission plugin %q failed to complete validation in %v", p.name, p.timeout)) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission_test.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission_test.go new file mode 100644 index 00000000000..892f8a2b0fc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission_test.go @@ -0,0 +1,107 @@ +package admissiontimeout + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" +) + +type admitFunc func(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error + +type dummyAdmit struct { + admitFn admitFunc +} + +func (p dummyAdmit) Handles(operation admission.Operation) bool { + return true +} + +func (p dummyAdmit) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + return p.admitFn(ctx, a, o) +} + +func (p dummyAdmit) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + return p.admitFn(ctx, a, o) +} + +func TestTimeoutAdmission(t *testing.T) { + utilruntime.ReallyCrash = false + + tests := []struct { + name string + + timeout time.Duration + admissionPlugin func() (admit admitFunc, stopCh chan struct{}) + expectedError string + }{ + { + name: "stops on time", + timeout: 50 * time.Millisecond, + admissionPlugin: func() (admitFunc, chan struct{}) { + stopCh := make(chan struct{}) + return func(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + <-stopCh + return nil + }, stopCh + }, + expectedError: `fake-name" failed to complete`, + }, + { + name: "stops on success", + timeout: 500 * time.Millisecond, + admissionPlugin: func() (admitFunc, chan struct{}) { + stopCh := make(chan struct{}) + return func(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + return fmt.Errorf("fake failure to finish") + }, stopCh + }, + expectedError: "fake failure to finish", + }, + { + name: "no crash on panic", + timeout: 500 * time.Millisecond, + admissionPlugin: func() (admitFunc, chan struct{}) { + stopCh := make(chan struct{}) + return func(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { + panic("fail!") + }, stopCh + }, + expectedError: "default to ", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + admitFn, stopCh := test.admissionPlugin() + defer close(stopCh) + + fakePlugin := dummyAdmit{admitFn: admitFn} + decorator := AdmissionTimeout{Timeout: test.timeout} + decoratedPlugin := decorator.WithTimeout(fakePlugin, "fake-name") + + actualErr := decoratedPlugin.(admission.MutationInterface).Admit(context.TODO(), nil, nil) + validateErr(t, actualErr, test.expectedError) + + actualErr = decoratedPlugin.(admission.ValidationInterface).Validate(context.TODO(), nil, nil) + validateErr(t, actualErr, test.expectedError) + }) + } +} + +func validateErr(t *testing.T, actualErr error, expectedError string) { + t.Helper() + switch { + case actualErr == nil && len(expectedError) != 0: + t.Fatal(expectedError) + case actualErr == nil && len(expectedError) == 0: + case actualErr != nil && len(expectedError) == 0: + t.Fatal(actualErr) + case actualErr != nil && !strings.Contains(actualErr.Error(), expectedError): + t.Fatal(actualErr) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go new file mode 100644 index 00000000000..611735cccbb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go @@ -0,0 +1,35 @@ +package apiserverconfig + +import ( + "net/http" + "strings" +) + +// cacheExcludedPaths is small and simple until the handlers include the cache headers they need +var cacheExcludedPathPrefixes = []string{ + "/swagger-2.0.0.json", + "/swagger-2.0.0.pb-v1", + "/swagger-2.0.0.pb-v1.gz", + "/swagger.json", + "/swaggerapi", + "/openapi/", +} + +// cacheControlFilter sets the Cache-Control header to the specified value. +func WithCacheControl(handler http.Handler, value string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if _, ok := w.Header()["Cache-Control"]; ok { + handler.ServeHTTP(w, req) + return + } + for _, prefix := range cacheExcludedPathPrefixes { + if strings.HasPrefix(req.URL.Path, prefix) { + handler.ServeHTTP(w, req) + return + } + } + + w.Header().Set("Cache-Control", value) + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go new file mode 100644 index 00000000000..5dde34ca7d4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go @@ -0,0 +1,26 @@ +package apiserverconfig + +import ( + "net/http" + "regexp" + + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + genericfilters "k8s.io/apiserver/pkg/server/filters" +) + +// request paths that match this regular expression will be treated as long running +// and not subjected to the default server timeout. +const originLongRunningEndpointsRE = "(/|^)(buildconfigs/.*/instantiatebinary|imagestreamimports)$" + +var ( + originLongRunningRequestRE = regexp.MustCompile(originLongRunningEndpointsRE) + kubeLongRunningFunc = genericfilters.BasicLongRunningRequestCheck( + sets.NewString("watch", "proxy"), + sets.NewString("attach", "exec", "proxy", "log", "portforward"), + ) +) + +func IsLongRunningRequest(r *http.Request, requestInfo *apirequest.RequestInfo) bool { + return originLongRunningRequestRE.MatchString(r.URL.Path) || kubeLongRunningFunc(r, requestInfo) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go new file mode 100644 index 00000000000..d97946b9b4f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go @@ -0,0 +1,129 @@ +package apiserverconfig + +import ( + "bytes" + "io/ioutil" + "net/http" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/request" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +type personalSARRequestInfoResolver struct { + // infoFactory is used to determine info for the request + infoFactory apirequest.RequestInfoResolver +} + +func newPersonalSARRequestInfoResolver(infoFactory apirequest.RequestInfoResolver) apirequest.RequestInfoResolver { + return &personalSARRequestInfoResolver{ + infoFactory: infoFactory, + } +} + +func (a *personalSARRequestInfoResolver) NewRequestInfo(req *http.Request) (*request.RequestInfo, error) { + requestInfo, err := a.infoFactory.NewRequestInfo(req) + if err != nil { + return requestInfo, err + } + + // only match SAR and LSAR requests for personal review + switch { + case !requestInfo.IsResourceRequest: + return requestInfo, nil + + case len(requestInfo.APIGroup) != 0 && requestInfo.APIGroup != "authorization.openshift.io": + return requestInfo, nil + + case len(requestInfo.Subresource) != 0: + return requestInfo, nil + + case requestInfo.Verb != "create": + return requestInfo, nil + + case requestInfo.Resource != "subjectaccessreviews" && requestInfo.Resource != "localsubjectaccessreviews": + return requestInfo, nil + } + + // at this point we're probably running a SAR or LSAR. Decode the body and check. This is expensive. + isSelfSAR, err := isPersonalAccessReviewFromRequest(req, requestInfo) + if err != nil { + return nil, err + } + if !isSelfSAR { + return requestInfo, nil + } + + // if we do have a self-SAR, rewrite the requestInfo to indicate this is a selfsubjectaccessreviews.authorization.k8s.io request + requestInfo.APIGroup = "authorization.k8s.io" + requestInfo.Resource = "selfsubjectaccessreviews" + + return requestInfo, nil +} + +// isPersonalAccessReviewFromRequest this variant handles the case where we have an httpRequest +func isPersonalAccessReviewFromRequest(req *http.Request, requestInfo *request.RequestInfo) (bool, error) { + // TODO once we're integrated with the api installer, we should have direct access to the deserialized content + // for now, this only happens on subjectaccessreviews with a personal check, pay the double retrieve and decode cost + body, err := ioutil.ReadAll(req.Body) + if err != nil { + return false, err + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + + defaultGVK := schema.GroupVersionKind{Version: requestInfo.APIVersion, Group: requestInfo.APIGroup} + switch requestInfo.Resource { + case "subjectaccessreviews": + defaultGVK.Kind = "SubjectAccessReview" + case "localsubjectaccessreviews": + defaultGVK.Kind = "LocalSubjectAccessReview" + } + + obj, _, err := sarCodecFactory.UniversalDeserializer().Decode(body, &defaultGVK, nil) + if err != nil { + return false, err + } + switch castObj := obj.(type) { + case *authorizationv1.SubjectAccessReview: + return IsPersonalAccessReviewFromSAR(castObj), nil + + case *authorizationv1.LocalSubjectAccessReview: + return isPersonalAccessReviewFromLocalSAR(castObj), nil + + default: + return false, nil + } +} + +// IsPersonalAccessReviewFromSAR this variant handles the case where we have an SAR +func IsPersonalAccessReviewFromSAR(sar *authorizationv1.SubjectAccessReview) bool { + if len(sar.User) == 0 && len(sar.GroupsSlice) == 0 { + return true + } + + return false +} + +// isPersonalAccessReviewFromLocalSAR this variant handles the case where we have a local SAR +func isPersonalAccessReviewFromLocalSAR(sar *authorizationv1.LocalSubjectAccessReview) bool { + if len(sar.User) == 0 && len(sar.GroupsSlice) == 0 { + return true + } + + return false +} + +var ( + sarScheme = runtime.NewScheme() + sarCodecFactory = serializer.NewCodecFactory(sarScheme) +) + +func init() { + utilruntime.Must(authorizationv1.Install(sarScheme)) + utilruntime.Must(authorizationv1.DeprecatedInstallWithoutGroup(sarScheme)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go new file mode 100644 index 00000000000..7682302f89a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go @@ -0,0 +1,34 @@ +package apiserverconfig + +import ( + "net/http" + + apirequest "k8s.io/apiserver/pkg/endpoints/request" + + projectv1 "github.com/openshift/api/project/v1" +) + +type projectRequestInfoResolver struct { + // infoFactory is used to determine info for the request + infoFactory apirequest.RequestInfoResolver +} + +func newProjectRequestInfoResolver(infoFactory apirequest.RequestInfoResolver) apirequest.RequestInfoResolver { + return &projectRequestInfoResolver{ + infoFactory: infoFactory, + } +} + +func (a *projectRequestInfoResolver) NewRequestInfo(req *http.Request) (*apirequest.RequestInfo, error) { + requestInfo, err := a.infoFactory.NewRequestInfo(req) + if err != nil { + return requestInfo, err + } + + // if the resource is projects, we need to set the namespace to the value of the name. + if (len(requestInfo.APIGroup) == 0 || requestInfo.APIGroup == projectv1.GroupName) && requestInfo.Resource == "projects" && len(requestInfo.Name) > 0 { + requestInfo.Namespace = requestInfo.Name + } + + return requestInfo, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go new file mode 100644 index 00000000000..d14647d55a8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go @@ -0,0 +1,17 @@ +package apiserverconfig + +import ( + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +func OpenshiftRequestInfoResolver() apirequest.RequestInfoResolver { + // Default API request info factory + requestInfoFactory := &apirequest.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + personalSARRequestInfoResolver := newPersonalSARRequestInfoResolver(requestInfoFactory) + projectRequestInfoResolver := newProjectRequestInfoResolver(personalSARRequestInfoResolver) + return projectRequestInfoResolver +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go new file mode 100644 index 00000000000..91539fb6a6b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go @@ -0,0 +1,129 @@ +package httprequest + +import ( + "net" + "net/http" + "strings" + + "bitbucket.org/ww/goautoneg" +) + +// PrefersHTML returns true if the request was made by something that looks like a browser, or can receive HTML +func PrefersHTML(req *http.Request) bool { + accepts := goautoneg.ParseAccept(req.Header.Get("Accept")) + acceptsHTML := false + acceptsJSON := false + for _, accept := range accepts { + if accept.Type == "text" && accept.SubType == "html" { + acceptsHTML = true + } else if accept.Type == "application" && accept.SubType == "json" { + acceptsJSON = true + } + } + + // If HTML is accepted, return true + if acceptsHTML { + return true + } + + // If JSON was specifically requested, return false + // This gives browsers a way to make requests and add an "Accept" header to request JSON + if acceptsJSON { + return false + } + + // In Intranet/Compatibility mode, IE sends an Accept header that does not contain "text/html". + if strings.HasPrefix(req.UserAgent(), "Mozilla") { + return true + } + + return false +} + +// SchemeHost returns the scheme and host used to make this request. +// Suitable for use to compute scheme/host in returned 302 redirect Location. +// Note the returned host is not normalized, and may or may not contain a port. +// Returned values are based on the following information: +// +// Host: +// * X-Forwarded-Host/X-Forwarded-Port headers +// * Host field on the request (parsed from Host header) +// * Host in the request's URL (parsed from Request-Line) +// +// Scheme: +// * X-Forwarded-Proto header +// * Existence of TLS information on the request implies https +// * Scheme in the request's URL (parsed from Request-Line) +// * Port (if included in calculated Host value, 443 implies https) +// * Otherwise, defaults to "http" +func SchemeHost(req *http.Request) (string /*scheme*/, string /*host*/) { + forwarded := func(attr string) string { + // Get the X-Forwarded- value + value := req.Header.Get("X-Forwarded-" + attr) + // Take the first comma-separated value, if multiple exist + value = strings.SplitN(value, ",", 2)[0] + // Trim whitespace + return strings.TrimSpace(value) + } + + hasExplicitHost := func(h string) bool { + _, _, err := net.SplitHostPort(h) + return err == nil + } + + forwardedHost := forwarded("Host") + host := "" + hostHadExplicitPort := false + switch { + case len(forwardedHost) > 0: + host = forwardedHost + hostHadExplicitPort = hasExplicitHost(host) + + // If both X-Forwarded-Host and X-Forwarded-Port are sent, use the explicit port info + if forwardedPort := forwarded("Port"); len(forwardedPort) > 0 { + if h, _, err := net.SplitHostPort(forwardedHost); err == nil { + host = net.JoinHostPort(h, forwardedPort) + } else { + host = net.JoinHostPort(forwardedHost, forwardedPort) + } + } + + case len(req.Host) > 0: + host = req.Host + hostHadExplicitPort = hasExplicitHost(host) + + case len(req.URL.Host) > 0: + host = req.URL.Host + hostHadExplicitPort = hasExplicitHost(host) + } + + port := "" + if _, p, err := net.SplitHostPort(host); err == nil { + port = p + } + + forwardedProto := forwarded("Proto") + scheme := "" + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case req.TLS != nil: + scheme = "https" + case len(req.URL.Scheme) > 0: + scheme = req.URL.Scheme + case port == "443": + scheme = "https" + default: + scheme = "http" + } + + if !hostHadExplicitPort { + if (scheme == "https" && port == "443") || (scheme == "http" && port == "80") { + if hostWithoutPort, _, err := net.SplitHostPort(host); err == nil { + host = hostWithoutPort + } + } + } + + return scheme, host +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest_test.go b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest_test.go new file mode 100644 index 00000000000..add344aedf3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest_test.go @@ -0,0 +1,236 @@ +package httprequest + +import ( + "crypto/tls" + "net/http" + "net/url" + "testing" +) + +func TestSchemeHost(t *testing.T) { + + testcases := map[string]struct { + req *http.Request + expectedScheme string + expectedHost string + }{ + "X-Forwarded-Host and X-Forwarded-Port combined": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + "X-Forwarded-Port overwrites X-Forwarded-Host port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com:1234"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com:443", + }, + "X-Forwarded-* multiple attrs": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "reqhost", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com,foo.com"}, + "X-Forwarded-Port": []string{"443,123"}, + "X-Forwarded-Proto": []string{"https,http"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + "stripped X-Forwarded-Host and X-Forwarded-Port with non-standard port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"80"}, + "X-Forwarded-Proto": []string{"https"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com:80", + }, + "detect scheme from X-Forwarded-Port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"443"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + + "req host": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "example.com", + }, + expectedScheme: "http", + expectedHost: "example.com", + }, + "req host with port": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "example.com:80", + }, + expectedScheme: "http", + expectedHost: "example.com:80", + }, + "req host with tls port": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "example.com:443", + }, + expectedScheme: "https", + expectedHost: "example.com:443", + }, + + "req tls": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "example.com", + TLS: &tls.ConnectionState{}, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + + "req url": { + req: &http.Request{ + URL: &url.URL{Scheme: "https", Host: "example.com", Path: "/"}, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + "req url with port": { + req: &http.Request{ + URL: &url.URL{Scheme: "https", Host: "example.com:123", Path: "/"}, + }, + expectedScheme: "https", + expectedHost: "example.com:123", + }, + + // The following scenarios are captured from actual direct requests to pods + "non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "172.17.0.2:9080", + }, + expectedScheme: "http", + expectedHost: "172.17.0.2:9080", + }, + "tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "172.17.0.2:9443", + TLS: &tls.ConnectionState{ /* request has non-nil TLS connection state */ }, + }, + expectedScheme: "https", + expectedHost: "172.17.0.2:9443", + }, + + // The following scenarios are captured from actual requests to pods via services + "svc -> non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "service.default.svc.cluster.local:10080", + }, + expectedScheme: "http", + expectedHost: "service.default.svc.cluster.local:10080", + }, + "svc -> tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "service.default.svc.cluster.local:10443", + TLS: &tls.ConnectionState{ /* request has non-nil TLS connection state */ }, + }, + expectedScheme: "https", + expectedHost: "service.default.svc.cluster.local:10443", + }, + + // The following scenarios are captured from actual requests to pods via services via routes serviced by haproxy + "haproxy non-tls route -> svc -> non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local", + Header: http.Header{ + "X-Forwarded-Host": []string{"route-namespace.router.default.svc.cluster.local"}, + "X-Forwarded-Port": []string{"80"}, + "X-Forwarded-Proto": []string{"http"}, + "Forwarded": []string{"for=172.18.2.57;host=route-namespace.router.default.svc.cluster.local;proto=http"}, + "X-Forwarded-For": []string{"172.18.2.57"}, + }, + }, + expectedScheme: "http", + expectedHost: "route-namespace.router.default.svc.cluster.local", + }, + "haproxy edge terminated route -> svc -> non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local", + Header: http.Header{ + "X-Forwarded-Host": []string{"route-namespace.router.default.svc.cluster.local"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + "Forwarded": []string{"for=172.18.2.57;host=route-namespace.router.default.svc.cluster.local;proto=https"}, + "X-Forwarded-For": []string{"172.18.2.57"}, + }, + }, + expectedScheme: "https", + expectedHost: "route-namespace.router.default.svc.cluster.local", + }, + "haproxy edge terminated route -> svc -> non-tls pod with the explicit port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local:443", + Header: http.Header{ + "X-Forwarded-Host": []string{"route-namespace.router.default.svc.cluster.local:443"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + "Forwarded": []string{"for=172.18.2.57;host=route-namespace.router.default.svc.cluster.local:443;proto=https"}, + "X-Forwarded-For": []string{"172.18.2.57"}, + }, + }, + expectedScheme: "https", + expectedHost: "route-namespace.router.default.svc.cluster.local:443", + }, + "haproxy passthrough route -> svc -> tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local", + TLS: &tls.ConnectionState{ /* request has non-nil TLS connection state */ }, + }, + expectedScheme: "https", + expectedHost: "route-namespace.router.default.svc.cluster.local", + }, + } + + for k, tc := range testcases { + scheme, host := SchemeHost(tc.req) + if scheme != tc.expectedScheme { + t.Errorf("%s: expected scheme %q, got %q", k, tc.expectedScheme, scheme) + } + if host != tc.expectedHost { + t.Errorf("%s: expected host %q, got %q", k, tc.expectedHost, host) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/OWNERS b/vendor/github.com/openshift/library-go/pkg/apps/OWNERS new file mode 100644 index 00000000000..f0b0af2b58b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/OWNERS @@ -0,0 +1,10 @@ +reviewers: + - smarterclayton + - mfojtik + - soltysh + - tnozicka +approvers: + - smarterclayton + - mfojtik + - soltysh + - tnozicka diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme.go b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme.go new file mode 100644 index 00000000000..91789b1c4bb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme.go @@ -0,0 +1,30 @@ +package appsserialization + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + appsv1 "github.com/openshift/api/apps/v1" +) + +var ( + // for decoding, we want to be tolerant of groupified and non-groupified + annotationDecodingScheme = runtime.NewScheme() + annotationDecoder runtime.Decoder + + // for encoding, we want to be strict on groupified + annotationEncodingScheme = runtime.NewScheme() + annotationEncoder runtime.Encoder +) + +func init() { + utilruntime.Must(appsv1.Install(annotationDecodingScheme)) + utilruntime.Must(appsv1.DeprecatedInstallWithoutGroup(annotationDecodingScheme)) + annotationDecoderCodecFactory := serializer.NewCodecFactory(annotationDecodingScheme) + annotationDecoder = annotationDecoderCodecFactory.UniversalDecoder(appsv1.GroupVersion) + + utilruntime.Must(appsv1.Install(annotationEncodingScheme)) + annotationEncoderCodecFactory := serializer.NewCodecFactory(annotationEncodingScheme) + annotationEncoder = annotationEncoderCodecFactory.LegacyCodec(appsv1.GroupVersion) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme_test.go b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme_test.go new file mode 100644 index 00000000000..8354d2684dd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme_test.go @@ -0,0 +1,47 @@ +package appsserialization + +import ( + "strings" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/davecgh/go-spew/spew" + + appsv1 "github.com/openshift/api/apps/v1" +) + +const legacyDC = `{ + "apiVersion": "v1", + "kind": "DeploymentConfig", + "metadata": { + "name": "sinatra-app-example-a" + } +} +` + +func TestLegacyDecoding(t *testing.T) { + result, err := runtime.Decode(annotationDecoder, []byte(legacyDC)) + if err != nil { + t.Fatal(err) + } + if result.(*appsv1.DeploymentConfig).Name != "sinatra-app-example-a" { + t.Fatal(spew.Sdump(result)) + } + + groupfiedBytes, err := runtime.Encode(annotationEncoder, result) + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(groupfiedBytes), "apps.openshift.io/v1") { + t.Fatal(string(groupfiedBytes)) + } + + result2, err := runtime.Decode(annotationDecoder, groupfiedBytes) + if err != nil { + t.Fatal(err) + } + if result2.(*appsv1.DeploymentConfig).Name != "sinatra-app-example-a" { + t.Fatal(spew.Sdump(result2)) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/serialize.go b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/serialize.go new file mode 100644 index 00000000000..0433b77e440 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/serialize.go @@ -0,0 +1,31 @@ +package appsserialization + +import ( + "fmt" + + appsv1 "github.com/openshift/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DecodeDeploymentConfig decodes a DeploymentConfig from controller using annotation codec. +// An error is returned if the controller doesn't contain an encoded config or decoding fail. +func DecodeDeploymentConfig(controller metav1.ObjectMetaAccessor) (*appsv1.DeploymentConfig, error) { + encodedConfig, exists := controller.GetObjectMeta().GetAnnotations()[appsv1.DeploymentEncodedConfigAnnotation] + if !exists { + return nil, fmt.Errorf("object %s does not have encoded deployment config annotation", controller.GetObjectMeta().GetName()) + } + config, err := runtime.Decode(annotationDecoder, []byte(encodedConfig)) + if err != nil { + return nil, err + } + externalConfig, ok := config.(*appsv1.DeploymentConfig) + if !ok { + return nil, fmt.Errorf("object %+v is not v1.DeploymentConfig", config) + } + return externalConfig, nil +} + +func EncodeDeploymentConfig(config *appsv1.DeploymentConfig) ([]byte, error) { + return runtime.Encode(annotationEncoder, config) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/const.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/const.go new file mode 100644 index 00000000000..ccb9150bcaf --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/const.go @@ -0,0 +1,60 @@ +package appsutil + +const ( + // FailedRcCreateReason is added in a deployment config when it cannot create a new replication + // controller. + FailedRcCreateReason = "ReplicationControllerCreateError" + // NewReplicationControllerReason is added in a deployment config when it creates a new replication + // controller. + NewReplicationControllerReason = "NewReplicationControllerCreated" + // NewRcAvailableReason is added in a deployment config when its newest replication controller is made + // available ie. the number of new pods that have passed readiness checks and run for at least + // minReadySeconds is at least the minimum available pods that need to run for the deployment config. + NewRcAvailableReason = "NewReplicationControllerAvailable" + // TimedOutReason is added in a deployment config when its newest replication controller fails to show + // any progress within the given deadline (progressDeadlineSeconds). + TimedOutReason = "ProgressDeadlineExceeded" + // PausedConfigReason is added in a deployment config when it is paused. Lack of progress shouldn't be + // estimated once a deployment config is paused. + PausedConfigReason = "DeploymentConfigPaused" + // CancelledRolloutReason is added in a deployment config when its newest rollout was + // interrupted by cancellation. + CancelledRolloutReason = "RolloutCancelled" + + // DeploymentConfigLabel is the name of a label used to correlate a deployment with the + DeploymentConfigLabel = "deploymentconfig" + + // DeploymentLabel is the name of a label used to correlate a deployment with the Pod created + DeploymentLabel = "deployment" + + // MaxDeploymentDurationSeconds represents the maximum duration that a deployment is allowed to run. + // This is set as the default value for ActiveDeadlineSeconds for the deployer pod. + // Currently set to 6 hours. + MaxDeploymentDurationSeconds int64 = 21600 + + // DefaultRecreateTimeoutSeconds is the default TimeoutSeconds for RecreateDeploymentStrategyParams. + // Used by strategies: + DefaultRecreateTimeoutSeconds int64 = 10 * 60 + DefaultRollingTimeoutSeconds int64 = 10 * 60 + + // PreHookPodSuffix is the suffix added to all pre hook pods + PreHookPodSuffix = "hook-pre" + // MidHookPodSuffix is the suffix added to all mid hook pods + MidHookPodSuffix = "hook-mid" + // PostHookPodSuffix is the suffix added to all post hook pods + PostHookPodSuffix = "hook-post" + + // Used only internally by utils: + + // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state + // Used for specifying the reason for cancellation or failure of a deployment + DeploymentIgnorePodAnnotation = "deploy.openshift.io/deployer-pod.ignore" + DeploymentReplicasAnnotation = "openshift.io/deployment.replicas" + + DeploymentFailedUnrelatedDeploymentExists = "unrelated pod with the same name as this deployment is already running" + DeploymentFailedUnableToCreateDeployerPod = "unable to create deployer pod" + DeploymentFailedDeployerPodNoLongerExists = "deployer pod no longer exists" + + deploymentCancelledByUser = "cancelled by the user" + deploymentCancelledNewerDeploymentExists = "newer deployment was found running" +) diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/rc_scale_client.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/rc_scale_client.go new file mode 100644 index 00000000000..8c0cf1ce06f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/rc_scale_client.go @@ -0,0 +1,34 @@ +package appsutil + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" + "k8s.io/client-go/scale/scheme/autoscalingv1" +) + +// rcMapper pins preferred version to v1 and scale kind to autoscaling/v1 Scale +// this avoids putting complete server discovery (including extension APIs) in the critical path for deployments +type rcMapper struct{} + +func (rcMapper) ResourceFor(gvr schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if gvr.Group == "" && gvr.Resource == "replicationcontrollers" { + return schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}, nil + } + return schema.GroupVersionResource{}, fmt.Errorf("unknown replication controller resource: %#v", gvr) +} + +func (rcMapper) ScaleForResource(gvr schema.GroupVersionResource) (schema.GroupVersionKind, error) { + rcGvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} + if gvr == rcGvr { + return autoscalingv1.SchemeGroupVersion.WithKind("Scale"), nil + } + return schema.GroupVersionKind{}, fmt.Errorf("unknown replication controller resource: %#v", gvr) +} + +func NewReplicationControllerScaleClient(client kubernetes.Interface) scaleclient.ScalesGetter { + return scaleclient.New(client.CoreV1().RESTClient(), rcMapper{}, dynamic.LegacyAPIPathResolverFunc, rcMapper{}) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util.go new file mode 100644 index 00000000000..7da474c5915 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util.go @@ -0,0 +1,629 @@ +package appsutil + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/watch" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + + appsv1 "github.com/openshift/api/apps/v1" + "github.com/openshift/library-go/pkg/apps/appsserialization" + "github.com/openshift/library-go/pkg/build/naming" +) + +// DeployerPodNameForDeployment returns the name of a pod for a given deployment +func DeployerPodNameForDeployment(deployment string) string { + return naming.GetPodName(deployment, "deploy") +} + +// WaitForRunningDeployerPod waits a given period of time until the deployer pod +// for given replication controller is not running. +func WaitForRunningDeployerPod(podClient corev1client.PodsGetter, rc *corev1.ReplicationController, timeout time.Duration) error { + podName := DeployerPodNameForDeployment(rc.Name) + canGetLogs := func(p *corev1.Pod) bool { + return corev1.PodSucceeded == p.Status.Phase || corev1.PodFailed == p.Status.Phase || corev1.PodRunning == p.Status.Phase + } + + fieldSelector := fields.OneTermEqualSelector("metadata.name", podName).String() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return podClient.Pods(rc.Namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return podClient.Pods(rc.Namespace).Watch(options) + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + _, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, func(e watch.Event) (bool, error) { + switch e.Type { + case watch.Added, watch.Modified: + newPod, ok := e.Object.(*corev1.Pod) + if !ok { + return true, fmt.Errorf("unknown event object %#v", e.Object) + } + + return canGetLogs(newPod), nil + + case watch.Deleted: + return true, fmt.Errorf("pod got deleted %#v", e.Object) + + case watch.Error: + return true, fmt.Errorf("encountered error while watching for pod: %v", e.Object) + + default: + return true, fmt.Errorf("unexpected event type: %T", e.Type) + } + }) + return err +} + +func newControllerRef(config *appsv1.DeploymentConfig) *metav1.OwnerReference { + deploymentConfigControllerRefKind := appsv1.GroupVersion.WithKind("DeploymentConfig") + blockOwnerDeletion := true + isController := true + return &metav1.OwnerReference{ + APIVersion: deploymentConfigControllerRefKind.GroupVersion().String(), + Kind: deploymentConfigControllerRefKind.Kind, + Name: config.Name, + UID: config.UID, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} + +// MakeDeployment creates a deployment represented as a ReplicationController and based on the given DeploymentConfig. +// The controller replica count will be zero. +func MakeDeployment(config *appsv1.DeploymentConfig) (*v1.ReplicationController, error) { + // EncodeDeploymentConfig encodes config as a string using codec. + encodedConfig, err := appsserialization.EncodeDeploymentConfig(config) + if err != nil { + return nil, err + } + + deploymentName := LatestDeploymentNameForConfig(config) + podSpec := config.Spec.Template.Spec.DeepCopy() + + // Fix trailing and leading whitespace in the image field + // This is needed to sanitize old deployment configs where spaces were permitted but + // kubernetes 3.7 (#47491) tightened the validation of container image fields. + for i := range podSpec.Containers { + podSpec.Containers[i].Image = strings.TrimSpace(podSpec.Containers[i].Image) + } + + controllerLabels := make(labels.Set) + for k, v := range config.Labels { + controllerLabels[k] = v + } + // Correlate the deployment with the config. + // TODO: Using the annotation constant for now since the value is correct + // but we could consider adding a new constant to the public types. + controllerLabels[appsv1.DeploymentConfigAnnotation] = config.Name + + // Ensure that pods created by this deployment controller can be safely associated back + // to the controller, and that multiple deployment controllers for the same config don't + // manipulate each others' pods. + selector := map[string]string{} + for k, v := range config.Spec.Selector { + selector[k] = v + } + selector[DeploymentConfigLabel] = config.Name + selector[DeploymentLabel] = deploymentName + + podLabels := make(labels.Set) + for k, v := range config.Spec.Template.Labels { + podLabels[k] = v + } + podLabels[DeploymentConfigLabel] = config.Name + podLabels[DeploymentLabel] = deploymentName + + podAnnotations := make(labels.Set) + for k, v := range config.Spec.Template.Annotations { + podAnnotations[k] = v + } + podAnnotations[appsv1.DeploymentAnnotation] = deploymentName + podAnnotations[appsv1.DeploymentConfigAnnotation] = config.Name + podAnnotations[appsv1.DeploymentVersionAnnotation] = strconv.FormatInt(config.Status.LatestVersion, 10) + + controllerRef := newControllerRef(config) + zero := int32(0) + deployment := &v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: config.Namespace, + Annotations: map[string]string{ + appsv1.DeploymentConfigAnnotation: config.Name, + appsv1.DeploymentEncodedConfigAnnotation: string(encodedConfig), + appsv1.DeploymentStatusAnnotation: string(appsv1.DeploymentStatusNew), + appsv1.DeploymentVersionAnnotation: strconv.FormatInt(config.Status.LatestVersion, 10), + // This is the target replica count for the new deployment. + appsv1.DesiredReplicasAnnotation: strconv.Itoa(int(config.Spec.Replicas)), + DeploymentReplicasAnnotation: strconv.Itoa(0), + }, + Labels: controllerLabels, + OwnerReferences: []metav1.OwnerReference{*controllerRef}, + }, + Spec: v1.ReplicationControllerSpec{ + // The deployment should be inactive initially + Replicas: &zero, + Selector: selector, + MinReadySeconds: config.Spec.MinReadySeconds, + Template: &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + Annotations: podAnnotations, + }, + Spec: *podSpec, + }, + }, + } + if config.Status.Details != nil && len(config.Status.Details.Message) > 0 { + deployment.Annotations[appsv1.DeploymentStatusReasonAnnotation] = config.Status.Details.Message + } + if value, ok := config.Annotations[DeploymentIgnorePodAnnotation]; ok { + deployment.Annotations[DeploymentIgnorePodAnnotation] = value + } + + return deployment, nil +} + +// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that +// we are about to add already exists and has the same status and reason then we are not going to update. +func SetDeploymentCondition(status *appsv1.DeploymentConfigStatus, condition appsv1.DeploymentCondition) { + currentCond := GetDeploymentCondition(*status, condition.Type) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { + return + } + // Preserve lastTransitionTime if we are not switching between statuses of a condition. + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + + newConditions := filterOutCondition(status.Conditions, condition.Type) + status.Conditions = append(newConditions, condition) +} + +// RemoveDeploymentCondition removes the deployment condition with the provided type. +func RemoveDeploymentCondition(status *appsv1.DeploymentConfigStatus, condType appsv1.DeploymentConditionType) { + status.Conditions = filterOutCondition(status.Conditions, condType) +} + +// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type. +func filterOutCondition(conditions []appsv1.DeploymentCondition, condType appsv1.DeploymentConditionType) []appsv1.DeploymentCondition { + var newConditions []appsv1.DeploymentCondition + for _, c := range conditions { + if c.Type == condType { + continue + } + newConditions = append(newConditions, c) + } + return newConditions +} + +// IsOwnedByConfig checks whether the provided replication controller is part of a +// deployment configuration. +// TODO: Switch to use owner references once we got those working. +func IsOwnedByConfig(obj metav1.Object) bool { + _, ok := obj.GetAnnotations()[appsv1.DeploymentConfigAnnotation] + return ok +} + +// DeploymentsForCleanup determines which deployments for a configuration are relevant for the +// revision history limit quota +func DeploymentsForCleanup(configuration *appsv1.DeploymentConfig, deployments []*v1.ReplicationController) []v1.ReplicationController { + // if the past deployment quota has been exceeded, we need to prune the oldest deployments + // until we are not exceeding the quota any longer, so we sort oldest first + sort.Sort(sort.Reverse(ByLatestVersionDesc(deployments))) + + relevantDeployments := []v1.ReplicationController{} + activeDeployment := ActiveDeployment(deployments) + if activeDeployment == nil { + // if cleanup policy is set but no successful deployments have happened, there will be + // no active deployment. We can consider all of the deployments in this case except for + // the latest one + for i := range deployments { + deployment := deployments[i] + if deploymentVersionFor(deployment) != configuration.Status.LatestVersion { + relevantDeployments = append(relevantDeployments, *deployment) + } + } + } else { + // if there is an active deployment, we need to filter out any deployments that we don't + // care about, namely the active deployment and any newer deployments + for i := range deployments { + deployment := deployments[i] + if deployment != activeDeployment && deploymentVersionFor(deployment) < deploymentVersionFor(activeDeployment) { + relevantDeployments = append(relevantDeployments, *deployment) + } + } + } + + return relevantDeployments +} + +// LabelForDeployment builds a string identifier for a Deployment. +func LabelForDeployment(deployment *v1.ReplicationController) string { + return fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name) +} + +// LabelForDeploymentConfig builds a string identifier for a DeploymentConfig. +func LabelForDeploymentConfig(config runtime.Object) string { + accessor, _ := meta.Accessor(config) + return fmt.Sprintf("%s/%s", accessor.GetNamespace(), accessor.GetName()) +} + +// LatestDeploymentNameForConfig returns a stable identifier for deployment config +func LatestDeploymentNameForConfig(config *appsv1.DeploymentConfig) string { + return LatestDeploymentNameForConfigAndVersion(config.Name, config.Status.LatestVersion) +} + +// DeploymentNameForConfigVersion returns the name of the version-th deployment +// for the config that has the provided name +func DeploymentNameForConfigVersion(name string, version int64) string { + return fmt.Sprintf("%s-%d", name, version) +} + +// LatestDeploymentNameForConfigAndVersion returns a stable identifier for config based on its version. +func LatestDeploymentNameForConfigAndVersion(name string, version int64) string { + return fmt.Sprintf("%s-%d", name, version) +} + +func DeployerPodNameFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentPodAnnotation) +} + +func DeploymentConfigNameFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentConfigAnnotation) +} + +func DeploymentStatusReasonFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentStatusReasonAnnotation) +} + +func DeleteStatusReasons(rc *v1.ReplicationController) { + delete(rc.Annotations, appsv1.DeploymentStatusReasonAnnotation) + delete(rc.Annotations, appsv1.DeploymentCancelledAnnotation) +} + +func SetCancelledByUserReason(rc *v1.ReplicationController) { + rc.Annotations[appsv1.DeploymentCancelledAnnotation] = "true" + rc.Annotations[appsv1.DeploymentStatusReasonAnnotation] = deploymentCancelledByUser +} + +func SetCancelledByNewerDeployment(rc *v1.ReplicationController) { + rc.Annotations[appsv1.DeploymentCancelledAnnotation] = "true" + rc.Annotations[appsv1.DeploymentStatusReasonAnnotation] = deploymentCancelledNewerDeploymentExists +} + +// HasSynced checks if the provided deployment config has been noticed by the deployment +// config controller. +func HasSynced(dc *appsv1.DeploymentConfig, generation int64) bool { + return dc.Status.ObservedGeneration >= generation +} + +// HasChangeTrigger returns whether the provided deployment configuration has +// a config change trigger or not +func HasChangeTrigger(config *appsv1.DeploymentConfig) bool { + for _, trigger := range config.Spec.Triggers { + if trigger.Type == appsv1.DeploymentTriggerOnConfigChange { + return true + } + } + return false +} + +// HasTrigger returns whether the provided deployment configuration has any trigger +// defined or not. +func HasTrigger(config *appsv1.DeploymentConfig) bool { + return HasChangeTrigger(config) || HasImageChangeTrigger(config) +} + +// HasLastTriggeredImage returns whether all image change triggers in provided deployment +// configuration has the lastTriggerImage field set (iow. all images were updated for +// them). Returns false if deployment configuration has no image change trigger defined. +func HasLastTriggeredImage(config *appsv1.DeploymentConfig) bool { + hasImageTrigger := false + for _, trigger := range config.Spec.Triggers { + if trigger.Type == appsv1.DeploymentTriggerOnImageChange { + hasImageTrigger = true + if len(trigger.ImageChangeParams.LastTriggeredImage) == 0 { + return false + } + } + } + return hasImageTrigger +} + +// IsInitialDeployment returns whether the deployment configuration is the first version +// of this configuration. +func IsInitialDeployment(config *appsv1.DeploymentConfig) bool { + return config.Status.LatestVersion == 0 +} + +// IsRollingConfig returns true if the strategy type is a rolling update. +func IsRollingConfig(config *appsv1.DeploymentConfig) bool { + return config.Spec.Strategy.Type == appsv1.DeploymentStrategyTypeRolling +} + +// ResolveFenceposts is copy from k8s deployment_utils to avoid unnecessary imports +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} + +// MaxUnavailable returns the maximum unavailable pods a rolling deployment config can take. +func MaxUnavailable(config *appsv1.DeploymentConfig) int32 { + if !IsRollingConfig(config) { + return int32(0) + } + // Error caught by validation + _, maxUnavailable, _ := ResolveFenceposts(config.Spec.Strategy.RollingParams.MaxSurge, config.Spec.Strategy.RollingParams.MaxUnavailable, config.Spec.Replicas) + return maxUnavailable +} + +// MaxSurge returns the maximum surge pods a rolling deployment config can take. +func MaxSurge(config appsv1.DeploymentConfig) int32 { + if !IsRollingConfig(&config) { + return int32(0) + } + // Error caught by validation + maxSurge, _, _ := ResolveFenceposts(config.Spec.Strategy.RollingParams.MaxSurge, config.Spec.Strategy.RollingParams.MaxUnavailable, config.Spec.Replicas) + return maxSurge +} + +// AnnotationFor returns the annotation with key for obj. +func AnnotationFor(obj runtime.Object, key string) string { + objectMeta, err := meta.Accessor(obj) + if err != nil { + return "" + } + if objectMeta == nil || reflect.ValueOf(objectMeta).IsNil() { + return "" + } + return objectMeta.GetAnnotations()[key] +} + +// ActiveDeployment returns the latest complete deployment, or nil if there is +// no such deployment. The active deployment is not always the same as the +// latest deployment. +func ActiveDeployment(input []*v1.ReplicationController) *v1.ReplicationController { + var activeDeployment *v1.ReplicationController + var lastCompleteDeploymentVersion int64 = 0 + for i := range input { + deployment := input[i] + deploymentVersion := DeploymentVersionFor(deployment) + if IsCompleteDeployment(deployment) && deploymentVersion > lastCompleteDeploymentVersion { + activeDeployment = deployment + lastCompleteDeploymentVersion = deploymentVersion + } + } + return activeDeployment +} + +// ConfigSelector returns a label Selector which can be used to find all +// deployments for a DeploymentConfig. +// +// TODO: Using the annotation constant for now since the value is correct +// but we could consider adding a new constant to the public types. +func ConfigSelector(name string) labels.Selector { + return labels.SelectorFromValidatedSet(labels.Set{appsv1.DeploymentConfigAnnotation: name}) +} + +// IsCompleteDeployment returns true if the passed deployment is in state complete. +func IsCompleteDeployment(deployment runtime.Object) bool { + return DeploymentStatusFor(deployment) == appsv1.DeploymentStatusComplete +} + +// IsFailedDeployment returns true if the passed deployment failed. +func IsFailedDeployment(deployment runtime.Object) bool { + return DeploymentStatusFor(deployment) == appsv1.DeploymentStatusFailed +} + +// IsTerminatedDeployment returns true if the passed deployment has terminated (either +// complete or failed). +func IsTerminatedDeployment(deployment runtime.Object) bool { + return IsCompleteDeployment(deployment) || IsFailedDeployment(deployment) +} + +func IsDeploymentCancelled(deployment runtime.Object) bool { + value := AnnotationFor(deployment, appsv1.DeploymentCancelledAnnotation) + return strings.EqualFold(value, "true") +} + +// DeployerPodSelector returns a label Selector which can be used to find all +// deployer pods associated with a deployment with name. +func DeployerPodSelector(name string) labels.Selector { + return labels.SelectorFromValidatedSet(labels.Set{appsv1.DeployerPodForDeploymentLabel: name}) +} + +func DeploymentStatusFor(deployment runtime.Object) appsv1.DeploymentStatus { + return appsv1.DeploymentStatus(AnnotationFor(deployment, appsv1.DeploymentStatusAnnotation)) +} + +func SetDeploymentLatestVersionAnnotation(rc *v1.ReplicationController, version string) { + if rc.Annotations == nil { + rc.Annotations = map[string]string{} + } + rc.Annotations[appsv1.DeploymentVersionAnnotation] = version +} + +func DeploymentVersionFor(obj runtime.Object) int64 { + v, err := strconv.ParseInt(AnnotationFor(obj, appsv1.DeploymentVersionAnnotation), 10, 64) + if err != nil { + return -1 + } + return v +} + +func DeploymentNameFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentAnnotation) +} + +func deploymentVersionFor(obj runtime.Object) int64 { + v, err := strconv.ParseInt(AnnotationFor(obj, appsv1.DeploymentVersionAnnotation), 10, 64) + if err != nil { + return -1 + } + return v +} + +// LatestDeploymentInfo returns info about the latest deployment for a config, +// or nil if there is no latest deployment. The latest deployment is not +// always the same as the active deployment. +func LatestDeploymentInfo(config *appsv1.DeploymentConfig, deployments []*v1.ReplicationController) (bool, *v1.ReplicationController) { + if config.Status.LatestVersion == 0 || len(deployments) == 0 { + return false, nil + } + sort.Sort(ByLatestVersionDesc(deployments)) + candidate := deployments[0] + return deploymentVersionFor(candidate) == config.Status.LatestVersion, candidate +} + +// GetDeploymentCondition returns the condition with the provided type. +func GetDeploymentCondition(status appsv1.DeploymentConfigStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// GetReplicaCountForDeployments returns the sum of all replicas for the +// given deployments. +func GetReplicaCountForDeployments(deployments []*v1.ReplicationController) int32 { + totalReplicaCount := int32(0) + for _, deployment := range deployments { + count := deployment.Spec.Replicas + if count == nil { + continue + } + totalReplicaCount += *count + } + return totalReplicaCount +} + +// GetStatusReplicaCountForDeployments returns the sum of the replicas reported in the +// status of the given deployments. +func GetStatusReplicaCountForDeployments(deployments []*v1.ReplicationController) int32 { + totalReplicaCount := int32(0) + for _, deployment := range deployments { + totalReplicaCount += deployment.Status.Replicas + } + return totalReplicaCount +} + +// GetReadyReplicaCountForReplicationControllers returns the number of ready pods corresponding to +// the given replication controller. +func GetReadyReplicaCountForReplicationControllers(replicationControllers []*v1.ReplicationController) int32 { + totalReadyReplicas := int32(0) + for _, rc := range replicationControllers { + if rc != nil { + totalReadyReplicas += rc.Status.ReadyReplicas + } + } + return totalReadyReplicas +} + +// GetAvailableReplicaCountForReplicationControllers returns the number of available pods corresponding to +// the given replication controller. +func GetAvailableReplicaCountForReplicationControllers(replicationControllers []*v1.ReplicationController) int32 { + totalAvailableReplicas := int32(0) + for _, rc := range replicationControllers { + if rc != nil { + totalAvailableReplicas += rc.Status.AvailableReplicas + } + } + return totalAvailableReplicas +} + +// HasImageChangeTrigger returns whether the provided deployment configuration has +// an image change trigger or not. +func HasImageChangeTrigger(config *appsv1.DeploymentConfig) bool { + for _, trigger := range config.Spec.Triggers { + if trigger.Type == appsv1.DeploymentTriggerOnImageChange { + return true + } + } + return false +} + +// CanTransitionPhase returns whether it is allowed to go from the current to the next phase. +func CanTransitionPhase(current, next appsv1.DeploymentStatus) bool { + switch current { + case appsv1.DeploymentStatusNew: + switch next { + case appsv1.DeploymentStatusPending, + appsv1.DeploymentStatusRunning, + appsv1.DeploymentStatusFailed, + appsv1.DeploymentStatusComplete: + return true + } + case appsv1.DeploymentStatusPending: + switch next { + case appsv1.DeploymentStatusRunning, + appsv1.DeploymentStatusFailed, + appsv1.DeploymentStatusComplete: + return true + } + case appsv1.DeploymentStatusRunning: + switch next { + case appsv1.DeploymentStatusFailed, appsv1.DeploymentStatusComplete: + return true + } + } + return false +} + +type ByLatestVersionAsc []*v1.ReplicationController + +func (d ByLatestVersionAsc) Len() int { return len(d) } +func (d ByLatestVersionAsc) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d ByLatestVersionAsc) Less(i, j int) bool { + return DeploymentVersionFor(d[i]) < DeploymentVersionFor(d[j]) +} + +// ByLatestVersionDesc sorts deployments by LatestVersion descending. +type ByLatestVersionDesc []*v1.ReplicationController + +func (d ByLatestVersionDesc) Len() int { return len(d) } +func (d ByLatestVersionDesc) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d ByLatestVersionDesc) Less(i, j int) bool { + return DeploymentVersionFor(d[j]) < DeploymentVersionFor(d[i]) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util_test.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util_test.go new file mode 100644 index 00000000000..f4298e1c78f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util_test.go @@ -0,0 +1,425 @@ +package appsutil + +import ( + "reflect" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + appsv1 "github.com/openshift/api/apps/v1" +) + +func TestPodName(t *testing.T) { + deployment := &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testName", + }, + } + expected := "testName-deploy" + actual := DeployerPodNameForDeployment(deployment.Name) + if expected != actual { + t.Errorf("Unexpected pod name for deployment. Expected: %s Got: %s", expected, actual) + } +} + +func TestCanTransitionPhase(t *testing.T) { + tests := []struct { + name string + current, next appsv1.DeploymentStatus + expected bool + }{ + { + name: "New->New", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "New->Pending", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusPending, + expected: true, + }, + { + name: "New->Running", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusRunning, + expected: true, + }, + { + name: "New->Complete", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusComplete, + expected: true, + }, + { + name: "New->Failed", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusFailed, + expected: true, + }, + { + name: "Pending->New", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Pending->Pending", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Pending->Running", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusRunning, + expected: true, + }, + { + name: "Pending->Failed", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusFailed, + expected: true, + }, + { + name: "Pending->Complete", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusComplete, + expected: true, + }, + { + name: "Running->New", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Running->Pending", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Running->Running", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusRunning, + expected: false, + }, + { + name: "Running->Failed", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusFailed, + expected: true, + }, + { + name: "Running->Complete", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusComplete, + expected: true, + }, + { + name: "Complete->New", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Complete->Pending", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Complete->Running", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusRunning, + expected: false, + }, + { + name: "Complete->Failed", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusFailed, + expected: false, + }, + { + name: "Complete->Complete", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusComplete, + expected: false, + }, + { + name: "Failed->New", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Failed->Pending", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Failed->Running", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusRunning, + expected: false, + }, + { + name: "Failed->Complete", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusComplete, + expected: false, + }, + { + name: "Failed->Failed", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusFailed, + expected: false, + }, + } + + for _, test := range tests { + got := CanTransitionPhase(test.current, test.next) + if got != test.expected { + t.Errorf("%s: expected %t, got %t", test.name, test.expected, got) + } + } +} + +var ( + now = metav1.Now() + later = metav1.Time{Time: now.Add(time.Minute)} + earlier = metav1.Time{Time: now.Add(-time.Minute)} + + condProgressing = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + LastTransitionTime: now, + } + } + + condProgressingDifferentTime = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + LastTransitionTime: later, + } + } + + condProgressingDifferentReason = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + LastTransitionTime: later, + Reason: NewReplicationControllerReason, + } + } + + condNotProgressing = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionFalse, + LastUpdateTime: earlier, + LastTransitionTime: earlier, + } + } + + condAvailable = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + } + } +) + +func TestGetCondition(t *testing.T) { + exampleStatus := func() appsv1.DeploymentConfigStatus { + return appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{condProgressing(), condAvailable()}, + } + } + + tests := []struct { + name string + + status appsv1.DeploymentConfigStatus + condType appsv1.DeploymentConditionType + condStatus corev1.ConditionStatus + + expected bool + }{ + { + name: "condition exists", + + status: exampleStatus(), + condType: appsv1.DeploymentAvailable, + + expected: true, + }, + { + name: "condition does not exist", + + status: exampleStatus(), + condType: appsv1.DeploymentReplicaFailure, + + expected: false, + }, + } + + for _, test := range tests { + cond := GetDeploymentCondition(test.status, test.condType) + exists := cond != nil + if exists != test.expected { + t.Errorf("%s: expected condition to exist: %t, got: %t", test.name, test.expected, exists) + } + } +} + +func TestSetCondition(t *testing.T) { + tests := []struct { + name string + + status *appsv1.DeploymentConfigStatus + cond appsv1.DeploymentCondition + + expectedStatus *appsv1.DeploymentConfigStatus + }{ + { + name: "set for the first time", + + status: &appsv1.DeploymentConfigStatus{}, + cond: condAvailable(), + + expectedStatus: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condAvailable(), + }, + }, + }, + { + name: "simple set", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), + }, + }, + cond: condAvailable(), + + expectedStatus: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), condAvailable(), + }, + }, + }, + { + name: "replace if status changes", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condNotProgressing(), + }, + }, + cond: condProgressing(), + + expectedStatus: &appsv1.DeploymentConfigStatus{Conditions: []appsv1.DeploymentCondition{condProgressing()}}, + }, + { + name: "replace if reason changes", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), + }, + }, + cond: condProgressingDifferentReason(), + + expectedStatus: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + // Note that LastTransitionTime stays the same. + LastTransitionTime: now, + // Only the reason changes. + Reason: NewReplicationControllerReason, + }, + }, + }, + }, + { + name: "don't replace if status and reason don't change", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), + }, + }, + cond: condProgressingDifferentTime(), + + expectedStatus: &appsv1.DeploymentConfigStatus{Conditions: []appsv1.DeploymentCondition{condProgressing()}}, + }, + } + + for _, test := range tests { + t.Logf("running test %q", test.name) + SetDeploymentCondition(test.status, test.cond) + if !reflect.DeepEqual(test.status, test.expectedStatus) { + t.Errorf("expected status: %v, got: %v", test.expectedStatus, test.status) + } + } +} + +func TestRemoveCondition(t *testing.T) { + exampleStatus := func() *appsv1.DeploymentConfigStatus { + return &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{condProgressing(), condAvailable()}, + } + } + + tests := []struct { + name string + + status *appsv1.DeploymentConfigStatus + condType appsv1.DeploymentConditionType + + expectedStatus *appsv1.DeploymentConfigStatus + }{ + { + name: "remove from empty status", + + status: &appsv1.DeploymentConfigStatus{}, + condType: appsv1.DeploymentProgressing, + + expectedStatus: &appsv1.DeploymentConfigStatus{}, + }, + { + name: "simple remove", + + status: &appsv1.DeploymentConfigStatus{Conditions: []appsv1.DeploymentCondition{condProgressing()}}, + condType: appsv1.DeploymentProgressing, + + expectedStatus: &appsv1.DeploymentConfigStatus{}, + }, + { + name: "doesn't remove anything", + + status: exampleStatus(), + condType: appsv1.DeploymentReplicaFailure, + + expectedStatus: exampleStatus(), + }, + } + + for _, test := range tests { + RemoveDeploymentCondition(test.status, test.condType) + if !reflect.DeepEqual(test.status, test.expectedStatus) { + t.Errorf("%s: expected status: %v, got: %v", test.name, test.expectedStatus, test.status) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/assets.go b/vendor/github.com/openshift/library-go/pkg/assets/assets.go new file mode 100644 index 00000000000..5c26928676b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/assets.go @@ -0,0 +1,150 @@ +package assets + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/errors" +) + +type Permission os.FileMode + +const ( + PermissionDirectoryDefault Permission = 0755 + PermissionFileDefault Permission = 0644 + PermissionFileRestricted Permission = 0600 +) + +// Asset defines a single static asset. +type Asset struct { + Name string + FilePermission Permission + Data []byte +} + +// Assets is a list of assets. +type Assets []Asset + +// New walks through a directory recursively and renders each file as asset. Only those files +// are rendered that make all predicates true. +func New(dir string, data interface{}, predicates ...FileInfoPredicate) (Assets, error) { + files, err := LoadFilesRecursively(dir, predicates...) + if err != nil { + return nil, err + } + + var as Assets + var errs []error + for path, bs := range files { + a, err := assetFromTemplate(path, bs, data) + if err != nil { + errs = append(errs, fmt.Errorf("failed to render %q: %v", path, err)) + continue + } + + as = append(as, *a) + } + + if len(errs) > 0 { + return nil, errors.NewAggregate(errs) + } + + return as, nil +} + +// WriteFiles writes the assets to specified path. +func (as Assets) WriteFiles(path string) error { + if err := os.MkdirAll(path, os.FileMode(PermissionDirectoryDefault)); err != nil { + return err + } + for _, asset := range as { + if _, err := os.Stat(path); os.IsExist(err) { + fmt.Printf("WARNING: File %s already exists, content will be replaced\n", path) + } + if err := asset.WriteFile(path); err != nil { + return err + } + } + return nil +} + +// WriteFile writes a single asset into specified path. +func (a Asset) WriteFile(path string) error { + f := filepath.Join(path, a.Name) + perms := PermissionFileDefault + if err := os.MkdirAll(filepath.Dir(f), os.FileMode(PermissionDirectoryDefault)); err != nil { + return err + } + if a.FilePermission != 0 { + perms = a.FilePermission + } + fmt.Printf("Writing asset: %s\n", f) + return ioutil.WriteFile(f, a.Data, os.FileMode(perms)) +} + +// MustCreateAssetFromTemplate process the given template using and return an asset. +func MustCreateAssetFromTemplate(name string, template []byte, config interface{}) Asset { + asset, err := assetFromTemplate(name, template, config) + if err != nil { + panic(err) + } + return *asset +} + +func assetFromTemplate(name string, tb []byte, data interface{}) (*Asset, error) { + bs, err := renderFile(name, tb, data) + if err != nil { + return nil, err + } + return &Asset{Name: name, Data: bs}, nil +} + +type FileInfoPredicate func(os.FileInfo) bool + +// OnlyYaml is a predicate for LoadFilesRecursively filters out non-yaml files. +func OnlyYaml(info os.FileInfo) bool { + return strings.HasSuffix(info.Name(), ".yaml") || strings.HasSuffix(info.Name(), ".yml") +} + +// LoadFilesRecursively returns a map from relative path names to file content. +func LoadFilesRecursively(dir string, predicates ...FileInfoPredicate) (map[string][]byte, error) { + files := map[string][]byte{} + err := filepath.Walk(dir, + func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + + for _, p := range predicates { + if !p(info) { + return nil + } + } + + bs, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + // make path relative to dir + rel, err := filepath.Rel(dir, path) + if err != nil { + return err + } + + files[rel] = bs + return nil + }, + ) + if err != nil { + return nil, err + } + + return files, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/assets_test.go b/vendor/github.com/openshift/library-go/pkg/assets/assets_test.go new file mode 100644 index 00000000000..6e9f3bd11d3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/assets_test.go @@ -0,0 +1,61 @@ +package assets + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestAsset_WriteFile(t *testing.T) { + sampleAssets := Assets{ + { + Name: "test-default", + Data: []byte("test"), + }, + { + Name: "test-restricted", + FilePermission: PermissionFileRestricted, + Data: []byte("test"), + }, + { + Name: "test-default-explicit", + FilePermission: PermissionFileDefault, + Data: []byte("test"), + }, + } + + assetDir, err := ioutil.TempDir("", "asset-test") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + defer os.RemoveAll(assetDir) + + if err := sampleAssets.WriteFiles(assetDir); err != nil { + t.Fatalf("unexpected error when writing files: %v", err) + } + + if s, err := os.Stat(filepath.Join(assetDir, sampleAssets[0].Name)); err != nil { + t.Fatalf("expected file to exists, got: %v", err) + } else { + if s.Mode() != os.FileMode(PermissionFileDefault) { + t.Errorf("expected file to have %d permissions, got %d", PermissionFileDefault, s.Mode()) + } + } + + if s, err := os.Stat(filepath.Join(assetDir, sampleAssets[1].Name)); err != nil { + t.Fatalf("expected file to exists, got: %v", err) + } else { + if s.Mode() != os.FileMode(sampleAssets[1].FilePermission) { + t.Errorf("expected file to have %d permissions, got %d", sampleAssets[1].FilePermission, s.Mode()) + } + } + + if s, err := os.Stat(filepath.Join(assetDir, sampleAssets[2].Name)); err != nil { + t.Fatalf("expected file to exists, got: %v", err) + } else { + if s.Mode() != os.FileMode(sampleAssets[2].FilePermission) { + t.Errorf("expected file to have %s permissions, got %s", os.FileMode(sampleAssets[2].FilePermission), s.Mode()) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/OWNERS b/vendor/github.com/openshift/library-go/pkg/assets/create/OWNERS new file mode 100644 index 00000000000..f9d8e59e4b1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - mfojtik +approvers: + - mfojtik \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go new file mode 100644 index 00000000000..dad5375337b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go @@ -0,0 +1,322 @@ +package create + +import ( + "bytes" + "context" + "os" + "strings" + "sync" + "testing" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + ktesting "k8s.io/client-go/testing" + + "github.com/openshift/library-go/pkg/assets" +) + +func init() { + fetchLatestDiscoveryInfoFn = func(dc *discovery.DiscoveryClient) (meta.RESTMapper, error) { + resourcesForEnsureMutex.Lock() + defer resourcesForEnsureMutex.Unlock() + return restmapper.NewDiscoveryRESTMapper(resourcesForEnsure), nil + } + newClientsFn = func(config *rest.Config) (dynamic.Interface, *discovery.DiscoveryClient, error) { + fakeScheme := runtime.NewScheme() + // TODO: This is a workaround for dynamic fake client bug where the List kind is enforced and duplicated in object reactor. + fakeScheme.AddKnownTypeWithName(schema.GroupVersionKind{Version: "v1", Kind: "ListList"}, &unstructured.UnstructuredList{}) + dynamicClient := dynamicfake.NewSimpleDynamicClient(fakeScheme) + return dynamicClient, nil, nil + } +} + +var ( + resources = []*restmapper.APIGroupResources{ + { + Group: metav1.APIGroup{ + Name: "kubeapiserver.operator.openshift.io", + Versions: []metav1.GroupVersionForDiscovery{ + {Version: "v1alpha1"}, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{Version: "v1alpha1"}, + }, + VersionedResources: map[string][]metav1.APIResource{ + "v1alpha1": { + {Name: "kubeapiserveroperatorconfigs", Namespaced: false, Kind: "KubeAPIServerOperatorConfig"}, + }, + }, + }, + { + Group: metav1.APIGroup{ + Name: "apiextensions.k8s.io", + Versions: []metav1.GroupVersionForDiscovery{ + {Version: "v1beta1"}, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{Version: "v1beta1"}, + }, + VersionedResources: map[string][]metav1.APIResource{ + "v1beta1": { + {Name: "customresourcedefinitions", Namespaced: false, Kind: "CustomResourceDefinition"}, + }, + }, + }, + { + Group: metav1.APIGroup{ + Name: "", + Versions: []metav1.GroupVersionForDiscovery{ + {Version: "v1"}, + }, + PreferredVersion: metav1.GroupVersionForDiscovery{Version: "v1"}, + }, + VersionedResources: map[string][]metav1.APIResource{ + "v1": { + {Name: "namespaces", Namespaced: false, Kind: "Namespace"}, + {Name: "configmaps", Namespaced: true, Kind: "ConfigMap"}, + {Name: "secrets", Namespaced: true, Kind: "Secret"}, + }, + }, + }, + } + + // Copy this to not overlap with other tests if ran in parallel + resourcesForEnsure = resources + resourcesForEnsureMutex sync.Mutex +) + +func TestEnsureManifestsCreated(t *testing.T) { + // Success + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + err := EnsureManifestsCreated(ctx, "testdata", nil, CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + // Missing discovery info for kubeapiserverconfig + out := &bytes.Buffer{} + operatorResource := resourcesForEnsure[0] + resourcesForEnsure = resourcesForEnsure[1:] + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err = EnsureManifestsCreated(ctx, "testdata", nil, CreateOptions{Verbose: true, StdErr: out}) + if err == nil { + t.Fatal("expected error creating kubeapiserverconfig resource, got none") + } + if !strings.Contains(out.String(), "unable to get REST mapping") { + t.Fatalf("expected error logged to output when verbose is on, got: %s\n", out.String()) + } + + // Should succeed on updated discovery info + go func() { + time.Sleep(2 * time.Second) + resourcesForEnsureMutex.Lock() + defer resourcesForEnsureMutex.Unlock() + resourcesForEnsure = append(resourcesForEnsure, operatorResource) + }() + out = &bytes.Buffer{} + ctx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err = EnsureManifestsCreated(ctx, "testdata", nil, CreateOptions{Verbose: true, StdErr: out}) + if err != nil { + t.Fatal(err) + } + if !strings.Contains(out.String(), `no matches for kind "KubeAPIServerOperatorConfig"`) { + t.Fatalf("expected error logged to output when verbose is on, got: %s\n", out.String()) + } + if !strings.Contains(out.String(), `Created "0000_10_kube-apiserver-operator_01_config.crd.yaml" customresourcedefinitions.v1beta1.apiextensions.k8s.io`) { + t.Fatalf("expected success logged to output when verbose is on, got: %s\n", out.String()) + } +} + +func TestCreate(t *testing.T) { + ctx := context.Background() + + resourcesWithoutKubeAPIServer := resources[1:] + testConfigMap := &unstructured.Unstructured{} + testConfigMap.SetGroupVersionKind(schema.GroupVersionKind{ + Version: "v1", + Kind: "ConfigMap", + }) + testConfigMap.SetName("aggregator-client-ca") + testConfigMap.SetNamespace("openshift-kube-apiserver") + + testOperatorConfig := &unstructured.Unstructured{} + testOperatorConfig.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "kubeapiserver.operator.openshift.io", + Version: "v1alpha1", + Kind: "KubeAPIServerOperatorConfig", + }) + testOperatorConfig.SetName("instance") + + testOperatorConfigWithStatus := &unstructured.Unstructured{} + testOperatorConfigWithStatus.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "kubeapiserver.operator.openshift.io", + Version: "v1alpha1", + Kind: "KubeAPIServerOperatorConfig", + }) + testOperatorConfigWithStatus.SetName("instance") + testOperatorConfigStatusVal := make(map[string]interface{}) + testOperatorConfigStatusVal["initializedValue"] = "something before" + unstructured.SetNestedField(testOperatorConfigWithStatus.Object, testOperatorConfigStatusVal, "status") + + tests := []struct { + name string + discovery []*restmapper.APIGroupResources + expectError bool + expectFailedCount int + expectReload bool + existingObjects []runtime.Object + evalActions func(*testing.T, []ktesting.Action) + }{ + { + name: "create all resources", + discovery: resources, + }, + { + name: "fail to create kube apiserver operator config", + discovery: resourcesWithoutKubeAPIServer, + expectFailedCount: 2, + expectError: true, + expectReload: true, + }, + { + name: "create all resources", + discovery: resources, + existingObjects: []runtime.Object{testConfigMap}, + }, + { + name: "create all resources", + discovery: resources, + existingObjects: []runtime.Object{testOperatorConfig}, + evalActions: func(t *testing.T, actions []ktesting.Action) { + if got, exp := len(actions), 8; got != exp { + t.Errorf("expected %d actions, found %d", exp, got) + return + } + + ups, ok := actions[6].(ktesting.UpdateAction) + if !ok { + t.Errorf("expecting Update action for actions[5], got %T", actions[5]) + return + } + if got, exp := ups.GetSubresource(), "status"; got != exp { + t.Errorf("ecpecting the subresource to be %q, got %q", exp, got) + return + } + }, + }, + { + name: "create all resources", + discovery: resources, + existingObjects: []runtime.Object{testOperatorConfigWithStatus}, + evalActions: func(t *testing.T, actions []ktesting.Action) { + if got, exp := len(actions), 7; got != exp { + t.Errorf("expected %d actions, found %d", exp, got) + return + } + }, + }, + } + + fakeScheme := runtime.NewScheme() + // TODO: This is a workaround for dynamic fake client bug where the List kind is enforced and duplicated in object reactor. + fakeScheme.AddKnownTypeWithName(schema.GroupVersionKind{Version: "v1", Kind: "ListList"}, &unstructured.UnstructuredList{}) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + manifests, err := load("testdata", CreateOptions{}) + if err != nil { + t.Fatal(err) + } + + dynamicClient := dynamicfake.NewSimpleDynamicClient(fakeScheme, tc.existingObjects...) + restMapper := restmapper.NewDiscoveryRESTMapper(tc.discovery) + + err, reload := create(ctx, manifests, dynamicClient, restMapper, CreateOptions{Verbose: true, StdErr: os.Stderr}) + if tc.expectError && err == nil { + t.Errorf("expected error, got no error") + return + } + if !tc.expectError && err != nil { + t.Errorf("unexpected error: %v", err) + return + } + if tc.expectReload && !reload { + t.Errorf("expected reload, got none") + return + } + if !tc.expectReload && reload { + t.Errorf("unexpected reload, got one") + return + } + if len(manifests) != tc.expectFailedCount { + t.Errorf("expected %d failed manifests, got %d", tc.expectFailedCount, len(manifests)) + return + } + if tc.evalActions != nil { + tc.evalActions(t, dynamicClient.Actions()) + } + }) + + } +} + +func TestLoad(t *testing.T) { + tests := []struct { + name string + options CreateOptions + assetDir string + expectedManifestCount int + expectError bool + }{ + { + name: "read all manifests", + assetDir: "testdata", + expectedManifestCount: 6, + }, + { + name: "handle missing dir", + assetDir: "foo", + expectError: true, + }, + { + name: "read only 00_ prefixed files", + options: CreateOptions{ + Filters: []assets.FileInfoPredicate{ + func(info os.FileInfo) bool { + return strings.HasPrefix(info.Name(), "00") + }, + }, + }, + assetDir: "testdata", + expectedManifestCount: 2, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result, err := load(tc.assetDir, tc.options) + if tc.expectError && err == nil { + t.Errorf("expected error, got no error") + return + } + if !tc.expectError && err != nil { + t.Errorf("unexpected error: %v", err) + return + } + if len(result) != tc.expectedManifestCount { + t.Errorf("expected %d manifests loaded, got %d", tc.expectedManifestCount, len(result)) + return + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go new file mode 100644 index 00000000000..bf2bdf1a8d6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go @@ -0,0 +1,273 @@ +package create + +import ( + "context" + "fmt" + "io" + "os" + "sort" + "strings" + "time" + + "github.com/ghodss/yaml" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + + "github.com/openshift/library-go/pkg/assets" +) + +// CreateOptions allow to specify additional create options. +type CreateOptions struct { + // Filters allows to filter which files we will read from disk. + // Multiple filters can be specified, in that case only files matching all filters will be returned. + Filters []assets.FileInfoPredicate + + // Verbose if true will print out extra messages for debugging + Verbose bool + + // StdErr allows to override the standard error output for printing verbose messages. + // If not set, os.StdErr is used. + StdErr io.Writer +} + +// EnsureManifestsCreated ensures that all resource manifests from the specified directory are created. +// This function will try to create remaining resources in the manifest list after error is occurred. +// This function will keep retrying creation until no errors are reported or the timeout is hit. +// Pass the context to indicate how much time you are willing to wait until all resources are created. +func EnsureManifestsCreated(ctx context.Context, manifestDir string, restConfig *rest.Config, options CreateOptions) error { + client, dc, err := newClientsFn(restConfig) + if err != nil { + return err + } + + manifests, err := load(manifestDir, options) + if err != nil { + return err + } + + if options.Verbose && options.StdErr == nil { + options.StdErr = os.Stderr + } + + // Default QPS in client (when not specified) is 5 requests/per second + // This specifies the interval between "create-all-resources", no need to make this configurable. + interval := 200 * time.Millisecond + + // Retry creation until no errors are returned or the timeout is hit. + var ( + lastCreateError error + retryCount int + mapper meta.RESTMapper + needDiscoveryRefresh bool = true + ) + err = wait.PollImmediateUntil(interval, func() (bool, error) { + retryCount++ + // If we get rest mapper error, we need to pull updated discovery info from API server + if needDiscoveryRefresh { + mapper, err = fetchLatestDiscoveryInfoFn(dc) + if err != nil { + if options.Verbose { + fmt.Fprintf(options.StdErr, "[#%d] failed to fetch discovery: %s\n", retryCount, err) + } + return false, nil + } + } + err, needDiscoveryRefresh = create(ctx, manifests, client, mapper, options) + if err == nil { + lastCreateError = nil + return true, nil + } + if ctx.Err() == nil || lastCreateError == nil { + lastCreateError = err + } + if options.Verbose { + fmt.Fprintf(options.StdErr, "[#%d] %s\n", retryCount, err) + } + return false, nil + }, ctx.Done()) + + // Return the last observed set of errors from the create process instead of timeout error. + if lastCreateError != nil { + return lastCreateError + } + + return err +} + +// allow to override in unit test +var newClientsFn = newClients + +func newClients(config *rest.Config) (dynamic.Interface, *discovery.DiscoveryClient, error) { + client, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + + // TODO: We can use cacheddiscovery.NewMemCacheClient(dc) and then call .Invalidate() instead of fetchLatestDiscoveryInfo. + // It will require more work in unit test though. + dc, err := discovery.NewDiscoveryClientForConfig(config) + if err != nil { + return nil, nil, err + } + + return client, dc, nil +} + +// allow to override in unit test +var fetchLatestDiscoveryInfoFn = fetchLatestDiscoveryInfo + +func fetchLatestDiscoveryInfo(dc *discovery.DiscoveryClient) (meta.RESTMapper, error) { + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// create will attempt to create all manifests provided using dynamic client. +// It will mutate the manifests argument in case the create succeeded for given manifest. When all manifests are successfully created the resulting +// manifests argument should be empty. +func create(ctx context.Context, manifests map[string]*unstructured.Unstructured, client dynamic.Interface, mapper meta.RESTMapper, options CreateOptions) (error, bool) { + sortedManifestPaths := []string{} + for key := range manifests { + sortedManifestPaths = append(sortedManifestPaths, key) + } + sort.Strings(sortedManifestPaths) + + // Record all errors for the given manifest path (so when we report errors, users can see what manifest failed). + errs := map[string]error{} + + // In case we fail to find a rest-mapping for the resource, force to fetch the updated discovery on next run. + reloadDiscovery := false + + for _, path := range sortedManifestPaths { + select { + case <-ctx.Done(): + return ctx.Err(), false + default: + } + + gvk := manifests[path].GetObjectKind().GroupVersionKind() + mappings, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + errs[path] = fmt.Errorf("unable to get REST mapping for %q: %v", path, err) + reloadDiscovery = true + continue + } + + var resource dynamic.ResourceInterface + if mappings.Scope.Name() == meta.RESTScopeNameRoot { + resource = client.Resource(mappings.Resource) + } else { + resource = client.Resource(mappings.Resource).Namespace(manifests[path].GetNamespace()) + } + resourceString := mappings.Resource.Resource + "." + mappings.Resource.Version + "." + mappings.Resource.Group + "/" + manifests[path].GetName() + " -n " + manifests[path].GetNamespace() + + incluster, err := resource.Create(manifests[path], metav1.CreateOptions{}) + + if err == nil && options.Verbose { + fmt.Fprintf(options.StdErr, "Created %q %s\n", path, resourceString) + } + + // Resource already exists means we already succeeded + // This should never happen as we remove already created items from the manifest list, unless the resource existed beforehand. + if kerrors.IsAlreadyExists(err) { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Skipped %q %s as it already exists\n", path, resourceString) + } + incluster, err = resource.Get(manifests[path].GetName(), metav1.GetOptions{}) + if err != nil { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to get already existing %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to get %s: %v", resourceString, err) + continue + } + } + + if err != nil { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to create %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to create %s: %v", resourceString, err) + continue + } + + if _, ok := manifests[path].Object["status"]; ok { + _, found := incluster.Object["status"] + if !found { + incluster.Object["status"] = manifests[path].Object["status"] + incluster, err = resource.UpdateStatus(incluster, metav1.UpdateOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to update status for the %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to update status for %s: %v", resourceString, err) + continue + } + if err == nil && options.Verbose { + fmt.Fprintf(options.StdErr, "Updated status for %q %s\n", path, resourceString) + } + } + } + // Creation succeeded lets remove the manifest from the list to avoid creating it second time + delete(manifests, path) + } + + return formatErrors("failed to create some manifests", errs), reloadDiscovery +} + +func formatErrors(prefix string, errors map[string]error) error { + if len(errors) == 0 { + return nil + } + aggregatedErrMessages := []string{} + keys := []string{} + for key := range errors { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, k := range keys { + aggregatedErrMessages = append(aggregatedErrMessages, fmt.Sprintf("%q: %v", k, errors[k])) + } + return fmt.Errorf("%s:\n%s", prefix, strings.Join(aggregatedErrMessages, "\n")) +} + +func load(assetsDir string, options CreateOptions) (map[string]*unstructured.Unstructured, error) { + manifests := map[string]*unstructured.Unstructured{} + manifestsBytesMap, err := assets.LoadFilesRecursively(assetsDir, options.Filters...) + if err != nil { + return nil, err + } + + errs := map[string]error{} + for manifestPath, manifestBytes := range manifestsBytesMap { + manifestJSON, err := yaml.YAMLToJSON(manifestBytes) + if err != nil { + errs[manifestPath] = fmt.Errorf("unable to convert asset %q from YAML to JSON: %v", manifestPath, err) + continue + } + manifestObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, manifestJSON) + if err != nil { + errs[manifestPath] = fmt.Errorf("unable to decode asset %q: %v", manifestPath, err) + continue + } + manifestUnstructured, ok := manifestObj.(*unstructured.Unstructured) + if !ok { + errs[manifestPath] = fmt.Errorf("unable to convert asset %q to unstructured", manifestPath) + continue + } + manifests[manifestPath] = manifestUnstructured + } + + return manifests, formatErrors("failed to load some manifests", errs) +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/0000_10_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/0000_10_kube-apiserver-operator_01_config.crd.yaml new file mode 100644 index 00000000000..bea8b9a596a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/0000_10_kube-apiserver-operator_01_config.crd.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: kubeapiserveroperatorconfigs.kubeapiserver.operator.openshift.io +spec: + scope: Cluster + group: kubeapiserver.operator.openshift.io + version: v1alpha1 + names: + kind: KubeAPIServerOperatorConfig + plural: kubeapiserveroperatorconfigs + singular: kubeapiserveroperatorconfig + categories: + - coreoperators + subresources: + status: {} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/00_openshift-kube-apiserver-ns.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/00_openshift-kube-apiserver-ns.yaml new file mode 100644 index 00000000000..d208ba4ee7b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/00_openshift-kube-apiserver-ns.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-kube-apiserver + labels: + openshift.io/run-level: "0" \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/configmap-aggregator-client-ca.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/configmap-aggregator-client-ca.yaml new file mode 100644 index 00000000000..c3f63e8c7d1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/configmap-aggregator-client-ca.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aggregator-client-ca + namespace: openshift-kube-apiserver +data: + ca-bundle.crt: diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml new file mode 100644 index 00000000000..81133ceaac6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml @@ -0,0 +1,7 @@ +apiVersion: kubeapiserver.operator.openshift.io/v1alpha1 +kind: KubeAPIServerOperatorConfig +metadata: + name: instance-empty-status +spec: + managementState: Managed +status: diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml new file mode 100644 index 00000000000..a946007c1ab --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml @@ -0,0 +1,8 @@ +apiVersion: kubeapiserver.operator.openshift.io/v1alpha1 +kind: KubeAPIServerOperatorConfig +metadata: + name: instance +spec: + managementState: Managed +status: + initializedValue: something diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/secret-aggregator-client.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/secret-aggregator-client.yaml new file mode 100644 index 00000000000..9b8da64a0c3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/secret-aggregator-client.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: Secret +metadata: + name: aggregator-client + namespace: openshift-kube-apiserver +type: SecretTypeTLS +data: diff --git a/vendor/github.com/openshift/library-go/pkg/assets/template.go b/vendor/github.com/openshift/library-go/pkg/assets/template.go new file mode 100644 index 00000000000..78543922039 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/template.go @@ -0,0 +1,78 @@ +package assets + +import ( + "bytes" + "encoding/base64" + "strings" + "text/template" + "time" + + "k8s.io/client-go/util/cert" +) + +var templateFuncs = map[string]interface{}{ + "notAfter": notAfter, + "notBefore": notBefore, + "issuer": issuer, + "base64": base64encode, + "indent": indent, + "load": load, +} + +func indent(indention int, v []byte) string { + newline := "\n" + strings.Repeat(" ", indention) + return strings.Replace(string(v), "\n", newline, -1) +} + +func base64encode(v []byte) string { + return base64.StdEncoding.EncodeToString(v) +} + +func notAfter(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].NotAfter.Format(time.RFC3339) +} + +func notBefore(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].NotBefore.Format(time.RFC3339) +} + +func issuer(certBytes []byte) string { + if len(certBytes) == 0 { + return "" + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + panic(err) + } + return certs[0].Issuer.CommonName +} + +func load(n string, assets map[string][]byte) []byte { + return assets[n] +} + +func renderFile(name string, tb []byte, data interface{}) ([]byte, error) { + tmpl, err := template.New(name).Funcs(templateFuncs).Parse(string(tb)) + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator/bootstrap.go b/vendor/github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator/bootstrap.go new file mode 100644 index 00000000000..a62c3a3ad01 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator/bootstrap.go @@ -0,0 +1,153 @@ +package bootstrapauthenticator + +import ( + "context" + "crypto/sha512" + "encoding/base64" + "fmt" + "time" + + "golang.org/x/crypto/bcrypt" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/typed/core/v1" +) + +const ( + // BootstrapUser is the magic bootstrap OAuth user that can perform any action + BootstrapUser = "kube:admin" + // support basic auth which does not allow : in username + bootstrapUserBasicAuth = "kubeadmin" + // force the use of a secure password length + // expected format is 5char-5char-5char-5char + minPasswordLen = 23 +) + +var ( + // make it obvious that we refuse to honor short passwords + errPasswordTooShort = fmt.Errorf("%s password must be at least %d characters long", bootstrapUserBasicAuth, minPasswordLen) + + // we refuse to honor a secret that is too new when compared to kube-system + // since kube-system always exists and cannot be deleted + // and creation timestamp is controlled by the api, we can use this to + // detect if the secret was recreated after the initial bootstrapping + errSecretRecreated = fmt.Errorf("%s secret cannot be recreated", bootstrapUserBasicAuth) +) + +func New(getter BootstrapUserDataGetter) authenticator.Password { + return &bootstrapPassword{ + getter: getter, + names: sets.NewString(BootstrapUser, bootstrapUserBasicAuth), + } +} + +type bootstrapPassword struct { + getter BootstrapUserDataGetter + names sets.String +} + +func (b *bootstrapPassword) AuthenticatePassword(ctx context.Context, username, password string) (*authenticator.Response, bool, error) { + if !b.names.Has(username) { + return nil, false, nil + } + + data, ok, err := b.getter.Get() + if err != nil || !ok { + return nil, ok, err + } + + // check length after we know that the secret is functional since + // we do not want to complain when the bootstrap user is disabled + if len(password) < minPasswordLen { + return nil, false, errPasswordTooShort + } + + if err := bcrypt.CompareHashAndPassword(data.PasswordHash, []byte(password)); err != nil { + if err == bcrypt.ErrMismatchedHashAndPassword { + klog.V(4).Infof("%s password mismatch", bootstrapUserBasicAuth) + return nil, false, nil + } + return nil, false, err + } + + // do not set other fields, see identitymapper.userToInfo func + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: BootstrapUser, + UID: data.UID, // uid ties this authentication to the current state of the secret + }, + }, true, nil +} + +type BootstrapUserData struct { + PasswordHash []byte + UID string +} + +type BootstrapUserDataGetter interface { + Get() (data *BootstrapUserData, ok bool, err error) + // TODO add a method like: + // IsPermanentlyDisabled() bool + // and use it to gate the wiring of components related to the bootstrap user. + // when the oauth server is running embedded in the kube api server, this method would always + // return false because the control plane would not be functional at the time of the check. + // when running as an external process, we can assume a functional control plane to perform the check. +} + +func NewBootstrapUserDataGetter(secrets v1.SecretsGetter, namespaces v1.NamespacesGetter) BootstrapUserDataGetter { + return &bootstrapUserDataGetter{ + secrets: secrets.Secrets(metav1.NamespaceSystem), + namespaces: namespaces.Namespaces(), + } +} + +type bootstrapUserDataGetter struct { + secrets v1.SecretInterface + namespaces v1.NamespaceInterface +} + +func (b *bootstrapUserDataGetter) Get() (*BootstrapUserData, bool, error) { + secret, err := b.secrets.Get(bootstrapUserBasicAuth, metav1.GetOptions{}) + if errors.IsNotFound(err) { + klog.V(4).Infof("%s secret does not exist", bootstrapUserBasicAuth) + return nil, false, nil + } + if err != nil { + return nil, false, err + } + if secret.DeletionTimestamp != nil { + klog.V(4).Infof("%s secret is being deleted", bootstrapUserBasicAuth) + return nil, false, nil + } + namespace, err := b.namespaces.Get(metav1.NamespaceSystem, metav1.GetOptions{}) + if err != nil { + return nil, false, err + } + if secret.CreationTimestamp.After(namespace.CreationTimestamp.Add(time.Hour)) { + return nil, false, errSecretRecreated + } + + hashedPassword := secret.Data[bootstrapUserBasicAuth] + + // make sure the value is a valid bcrypt hash + if _, err := bcrypt.Cost(hashedPassword); err != nil { + return nil, false, err + } + + exactSecret := string(secret.UID) + secret.ResourceVersion + both := append([]byte(exactSecret), hashedPassword...) + + // use a hash to avoid leaking any derivative of the password + // this makes it easy for us to tell if the secret changed + uidBytes := sha512.Sum512(both) + + return &BootstrapUserData{ + PasswordHash: hashedPassword, + UID: base64.RawURLEncoding.EncodeToString(uidBytes[:]), + }, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go new file mode 100644 index 00000000000..74c179e6865 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go @@ -0,0 +1,56 @@ +package authorizationutil + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiserver/pkg/authentication/serviceaccount" +) + +func BuildRBACSubjects(users, groups []string) []rbacv1.Subject { + subjects := []rbacv1.Subject{} + + for _, user := range users { + saNamespace, saName, err := serviceaccount.SplitUsername(user) + if err == nil { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: saNamespace, Name: saName}) + } else { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: user}) + } + } + + for _, group := range groups { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: group}) + } + + return subjects +} + +func RBACSubjectsToUsersAndGroups(subjects []rbacv1.Subject, defaultNamespace string) (users []string, groups []string) { + for _, subject := range subjects { + + switch { + case subject.APIGroup == rbacv1.GroupName && subject.Kind == rbacv1.GroupKind: + groups = append(groups, subject.Name) + case subject.APIGroup == rbacv1.GroupName && subject.Kind == rbacv1.UserKind: + users = append(users, subject.Name) + case subject.APIGroup == "" && subject.Kind == rbacv1.ServiceAccountKind: + // default the namespace to namespace we're working in if + // it's available. This allows rolebindings that reference + // SAs in the local namespace to avoid having to qualify + // them. + ns := defaultNamespace + if len(subject.Namespace) > 0 { + ns = subject.Namespace + } + if len(ns) > 0 { + name := serviceaccount.MakeUsername(ns, subject.Name) + users = append(users, name) + } else { + // maybe error? this fails safe at any rate + } + default: + // maybe error? This fails safe at any rate + } + } + + return users, groups +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go new file mode 100644 index 00000000000..0953c41e0e8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go @@ -0,0 +1,48 @@ +package authorizationutil + +import ( + "errors" + + authorizationv1 "k8s.io/api/authorization/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/user" + authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" +) + +// AddUserToSAR adds the requisite user information to a SubjectAccessReview. +// It returns the modified SubjectAccessReview. +func AddUserToSAR(user user.Info, sar *authorizationv1.SubjectAccessReview) *authorizationv1.SubjectAccessReview { + sar.Spec.User = user.GetName() + // reminiscent of the bad old days of C. Copies copy the min number of elements of both source and dest + sar.Spec.Groups = make([]string, len(user.GetGroups())) + copy(sar.Spec.Groups, user.GetGroups()) + sar.Spec.Extra = map[string]authorizationv1.ExtraValue{} + + for k, v := range user.GetExtra() { + sar.Spec.Extra[k] = authorizationv1.ExtraValue(v) + } + + return sar +} + +// Authorize verifies that a given user is permitted to carry out a given +// action. If this cannot be determined, or if the user is not permitted, an +// error is returned. +func Authorize(sarClient authorizationclient.SubjectAccessReviewInterface, user user.Info, resourceAttributes *authorizationv1.ResourceAttributes) error { + sar := AddUserToSAR(user, &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: resourceAttributes, + }, + }) + + resp, err := sarClient.Create(sar) + if err == nil && resp != nil && resp.Status.Allowed { + return nil + } + + if err == nil { + err = errors.New(resp.Status.Reason) + } + return kerrors.NewForbidden(schema.GroupResource{Group: resourceAttributes.Group, Resource: resourceAttributes.Resource}, resourceAttributes.Name, err) +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go new file mode 100644 index 00000000000..e9b7518f3dd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go @@ -0,0 +1,86 @@ +package scopemetadata + +import ( + "fmt" + "strings" +) + +// role:: +type ClusterRoleEvaluator struct{} + +var clusterRoleEvaluatorInstance = ClusterRoleEvaluator{} + +func (ClusterRoleEvaluator) Handles(scope string) bool { + return ClusterRoleEvaluatorHandles(scope) +} + +func (e ClusterRoleEvaluator) Validate(scope string) error { + _, _, _, err := ClusterRoleEvaluatorParseScope(scope) + return err +} + +func (e ClusterRoleEvaluator) Describe(scope string) (string, string, error) { + roleName, scopeNamespace, escalating, err := ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return "", "", err + } + + // Anything you can do [in project "foo" | server-wide] that is also allowed by the "admin" role[, except access escalating resources like secrets] + + scopePhrase := "" + if scopeNamespace == scopesAllNamespaces { + scopePhrase = "server-wide" + } else { + scopePhrase = fmt.Sprintf("in project %q", scopeNamespace) + } + + warning := "" + escalatingPhrase := "" + if escalating { + warning = fmt.Sprintf("Includes access to escalating resources like secrets") + } else { + escalatingPhrase = ", except access escalating resources like secrets" + } + + description := fmt.Sprintf("Anything you can do %s that is also allowed by the %q role%s", scopePhrase, roleName, escalatingPhrase) + + return description, warning, nil +} + +func ClusterRoleEvaluatorHandles(scope string) bool { + return strings.HasPrefix(scope, clusterRoleIndicator) +} + +// ClusterRoleEvaluatorParseScope parses the requested scope, determining the requested role name, namespace, and if +// access to escalating objects is required. It will return an error if it doesn't parse cleanly +func ClusterRoleEvaluatorParseScope(scope string) (string /*role name*/, string /*namespace*/, bool /*escalating*/, error) { + if !ClusterRoleEvaluatorHandles(scope) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + return parseClusterRoleScope(scope) +} + +func parseClusterRoleScope(scope string) (string /*role name*/, string /*namespace*/, bool /*escalating*/, error) { + if !strings.HasPrefix(scope, clusterRoleIndicator) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + escalating := false + if strings.HasSuffix(scope, ":!") { + escalating = true + // clip that last segment before parsing the rest + scope = scope[:strings.LastIndex(scope, ":")] + } + + tokens := strings.SplitN(scope, ":", 2) + if len(tokens) != 2 { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + + // namespaces can't have colons, but roles can. pick last. + lastColonIndex := strings.LastIndex(tokens[1], ":") + if lastColonIndex <= 0 || lastColonIndex == (len(tokens[1])-1) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + + return tokens[1][0:lastColonIndex], tokens[1][lastColonIndex+1:], escalating, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go new file mode 100644 index 00000000000..65280256c8a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go @@ -0,0 +1,17 @@ +package scopemetadata + +// ScopeDescriber takes a scope and returns metadata about it +type ScopeDescriber interface { + // Handles returns true if this evaluator can evaluate this scope + Handles(scope string) bool + // Validate returns an error if the scope is malformed + Validate(scope string) error + // Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed + Describe(scope string) (description string, warning string, err error) +} + +// ScopeDescribers map prefixes to a function that handles that prefix +var ScopeDescribers = []ScopeDescriber{ + UserEvaluator{}, + ClusterRoleEvaluator{}, +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go new file mode 100644 index 00000000000..586a7d787ae --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go @@ -0,0 +1,68 @@ +package scopemetadata + +import ( + "fmt" +) + +// these must agree with the scope authorizer, but it's an API we cannot realistically change +const ( + scopesAllNamespaces = "*" + + userIndicator = "user:" + clusterRoleIndicator = "role:" + + UserInfo = userIndicator + "info" + UserAccessCheck = userIndicator + "check-access" + + // UserListScopedProjects gives explicit permission to see the projects that this token can see. + UserListScopedProjects = userIndicator + "list-scoped-projects" + + // UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems + // unrelated to openshift and to display projects for selection in a secondary UI. + UserListAllProjects = userIndicator + "list-projects" + + // UserFull includes all permissions of the user + userFull = userIndicator + "full" +) + +// user: +type UserEvaluator struct{} + +func (UserEvaluator) Handles(scope string) bool { + return UserEvaluatorHandles(scope) +} + +func (e UserEvaluator) Validate(scope string) error { + if e.Handles(scope) { + return nil + } + + return fmt.Errorf("unrecognized scope: %v", scope) +} + +var defaultSupportedScopesMap = map[string]string{ + UserInfo: "Read-only access to your user information (including username, identities, and group membership)", + UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`, + UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`, + UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`, + userFull: `Full read/write access with all of your permissions`, +} + +func (UserEvaluator) Describe(scope string) (string, string, error) { + switch scope { + case UserInfo, UserAccessCheck, UserListScopedProjects, UserListAllProjects: + return defaultSupportedScopesMap[scope], "", nil + case userFull: + return defaultSupportedScopesMap[scope], `Includes any access you have to escalating resources like secrets`, nil + default: + return "", "", fmt.Errorf("unrecognized scope: %v", scope) + } +} + +func UserEvaluatorHandles(scope string) bool { + switch scope { + case userFull, UserInfo, UserAccessCheck, UserListScopedProjects, UserListAllProjects: + return true + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go new file mode 100644 index 00000000000..59a7009b9ef --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go @@ -0,0 +1,152 @@ +package scopemetadata + +import ( + "fmt" + + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + + oauthv1 "github.com/openshift/api/oauth/v1" +) + +func ValidateScopes(scopes []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(scopes) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "may not be empty")) + } + + for i, scope := range scopes { + illegalCharacter := false + // https://tools.ietf.org/html/rfc6749#section-3.3 (full list of allowed chars is %x21 / %x23-5B / %x5D-7E) + // for those without an ascii table, that's `!`, `#-[`, `]-~` inclusive. + for _, ch := range scope { + switch { + case ch == '!': + case ch >= '#' && ch <= '[': + case ch >= ']' && ch <= '~': + default: + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, fmt.Sprintf("%v not allowed", ch))) + illegalCharacter = true + } + } + if illegalCharacter { + continue + } + + found := false + for _, evaluator := range ScopeDescribers { + if !evaluator.Handles(scope) { + continue + } + + found = true + if err := evaluator.Validate(scope); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, err.Error())) + break + } + } + + if !found { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, "no scope handler found")) + } + } + + return allErrs +} + +func ValidateScopeRestrictions(client *oauthv1.OAuthClient, scopes ...string) error { + if len(scopes) == 0 { + return fmt.Errorf("%s may not request unscoped tokens", client.Name) + } + + if len(client.ScopeRestrictions) == 0 { + return nil + } + + errs := []error{} + for _, scope := range scopes { + if err := validateScopeRestrictions(client, scope); err != nil { + errs = append(errs, err) + } + } + + return kutilerrors.NewAggregate(errs) +} + +func validateScopeRestrictions(client *oauthv1.OAuthClient, scope string) error { + errs := []error{} + + for _, restriction := range client.ScopeRestrictions { + if len(restriction.ExactValues) > 0 { + if err := validateLiteralScopeRestrictions(scope, restriction.ExactValues); err != nil { + errs = append(errs, err) + continue + } + return nil + } + + if restriction.ClusterRole != nil { + if !ClusterRoleEvaluatorHandles(scope) { + continue + } + if err := validateClusterRoleScopeRestrictions(scope, *restriction.ClusterRole); err != nil { + errs = append(errs, err) + continue + } + return nil + } + } + + // if we got here, then nothing matched. If we already have errors, do nothing, otherwise add one to make it report failed. + if len(errs) == 0 { + errs = append(errs, fmt.Errorf("%v did not match any scope restriction", scope)) + } + + return kutilerrors.NewAggregate(errs) +} + +func validateLiteralScopeRestrictions(scope string, literals []string) error { + for _, literal := range literals { + if literal == scope { + return nil + } + } + + return fmt.Errorf("%v not found in %v", scope, literals) +} + +func validateClusterRoleScopeRestrictions(scope string, restriction oauthv1.ClusterRoleScopeRestriction) error { + role, namespace, escalating, err := ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return err + } + + foundName := false + for _, restrictedRoleName := range restriction.RoleNames { + if restrictedRoleName == "*" || restrictedRoleName == role { + foundName = true + break + } + } + if !foundName { + return fmt.Errorf("%v does not use an approved name", scope) + } + + foundNamespace := false + for _, restrictedNamespace := range restriction.Namespaces { + if restrictedNamespace == "*" || restrictedNamespace == namespace { + foundNamespace = true + break + } + } + if !foundNamespace { + return fmt.Errorf("%v does not use an approved namespace", scope) + } + + if escalating && !restriction.AllowEscalation { + return fmt.Errorf("%v is not allowed to escalate", scope) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation_test.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation_test.go new file mode 100644 index 00000000000..ddb3ff77485 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation_test.go @@ -0,0 +1,141 @@ +package scopemetadata + +import ( + "strings" + "testing" + + oauthv1 "github.com/openshift/api/oauth/v1" +) + +func TestValidateScopeRestrictions(t *testing.T) { + testCases := []struct { + name string + scopes []string + client *oauthv1.OAuthClient + + expectedErrors []string + }{ + { + name: "unrestricted allows any", + scopes: []string{"one"}, + client: &oauthv1.OAuthClient{}, + }, + { + name: "unrestricted allows empty", + scopes: []string{""}, + client: &oauthv1.OAuthClient{}, + }, + { + name: "missing scopes check precedes unrestricted", + scopes: []string{}, + client: &oauthv1.OAuthClient{}, + expectedErrors: []string{"may not request unscoped tokens"}, + }, + { + name: "simple literal", + scopes: []string{"one"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ExactValues: []string{"two", "one"}}}, + }, + }, + { + name: "simple must match", + scopes: []string{"missing"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ExactValues: []string{"two", "one"}}}, + }, + expectedErrors: []string{`missing not found in [two one]`}, + }, + { + name: "cluster role name must match", + scopes: []string{clusterRoleIndicator + "three:alfa"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + expectedErrors: []string{`role:three:alfa does not use an approved name`}, + }, + { + name: "cluster role namespace must match", + scopes: []string{clusterRoleIndicator + "two:charlie"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + expectedErrors: []string{`role:two:charlie does not use an approved namespace`}, + }, + { + name: "cluster role escalation must match", + scopes: []string{clusterRoleIndicator + "two:bravo:!"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + expectedErrors: []string{`role:two:bravo:! is not allowed to escalate`}, + }, + { + name: "cluster role matches", + scopes: []string{clusterRoleIndicator + "two:bravo:!"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: true, + }}}, + }, + }, + { + name: "cluster role matches 2", + scopes: []string{clusterRoleIndicator + "two:bravo"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + }, + { + name: "cluster role star matches", + scopes: []string{clusterRoleIndicator + "two:bravo"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two", "*"}, + Namespaces: []string{"alfa", "bravo", "*"}, + AllowEscalation: true, + }}}, + }, + }, + } + + for _, tc := range testCases { + err := ValidateScopeRestrictions(tc.client, tc.scopes...) + if err != nil && len(tc.expectedErrors) == 0 { + t.Errorf("%s: unexpected error: %v", tc.name, err) + continue + } + if err == nil && len(tc.expectedErrors) > 0 { + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErrors) + continue + } + if err == nil && len(tc.expectedErrors) == 0 { + continue + } + + for _, expectedErr := range tc.expectedErrors { + if !strings.Contains(err.Error(), expectedErr) { + t.Errorf("%s: error %v missing %v", tc.name, err, expectedErr) + } + } + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil.go b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil.go new file mode 100644 index 00000000000..0ebc3d9a9d8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil.go @@ -0,0 +1,85 @@ +package buildutil + +import ( + corev1 "k8s.io/api/core/v1" + + buildv1 "github.com/openshift/api/build/v1" +) + +// GetInputReference returns the From ObjectReference associated with the +// BuildStrategy. +func GetInputReference(strategy buildv1.BuildStrategy) *corev1.ObjectReference { + switch { + case strategy.SourceStrategy != nil: + return &strategy.SourceStrategy.From + case strategy.DockerStrategy != nil: + return strategy.DockerStrategy.From + case strategy.CustomStrategy != nil: + return &strategy.CustomStrategy.From + default: + return nil + } +} + +// GetBuildEnv gets the build strategy environment +func GetBuildEnv(build *buildv1.Build) []corev1.EnvVar { + switch { + case build.Spec.Strategy.SourceStrategy != nil: + return build.Spec.Strategy.SourceStrategy.Env + case build.Spec.Strategy.DockerStrategy != nil: + return build.Spec.Strategy.DockerStrategy.Env + case build.Spec.Strategy.CustomStrategy != nil: + return build.Spec.Strategy.CustomStrategy.Env + case build.Spec.Strategy.JenkinsPipelineStrategy != nil: + return build.Spec.Strategy.JenkinsPipelineStrategy.Env + default: + return nil + } +} + +// SetBuildEnv replaces the current build environment +func SetBuildEnv(build *buildv1.Build, env []corev1.EnvVar) { + var oldEnv *[]corev1.EnvVar + + switch { + case build.Spec.Strategy.SourceStrategy != nil: + oldEnv = &build.Spec.Strategy.SourceStrategy.Env + case build.Spec.Strategy.DockerStrategy != nil: + oldEnv = &build.Spec.Strategy.DockerStrategy.Env + case build.Spec.Strategy.CustomStrategy != nil: + oldEnv = &build.Spec.Strategy.CustomStrategy.Env + case build.Spec.Strategy.JenkinsPipelineStrategy != nil: + oldEnv = &build.Spec.Strategy.JenkinsPipelineStrategy.Env + default: + return + } + *oldEnv = env +} + +// FindTriggerPolicy retrieves the BuildTrigger(s) of a given type from a build configuration. +// Returns nil if no matches are found. +func FindTriggerPolicy(triggerType buildv1.BuildTriggerType, config *buildv1.BuildConfig) (buildTriggers []buildv1.BuildTriggerPolicy) { + for _, specTrigger := range config.Spec.Triggers { + if specTrigger.Type == triggerType { + buildTriggers = append(buildTriggers, specTrigger) + } + } + return buildTriggers +} + +// ConfigNameForBuild returns the name of the build config from a +// build name. +func ConfigNameForBuild(build *buildv1.Build) string { + if build == nil { + return "" + } + if build.Annotations != nil { + if _, exists := build.Annotations[buildv1.BuildConfigAnnotation]; exists { + return build.Annotations[buildv1.BuildConfigAnnotation] + } + } + if _, exists := build.Labels[buildv1.BuildConfigLabel]; exists { + return build.Labels[buildv1.BuildConfigLabel] + } + return build.Labels[buildv1.BuildConfigLabelDeprecated] +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil_test.go b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil_test.go new file mode 100644 index 00000000000..33134c91912 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil_test.go @@ -0,0 +1 @@ +package buildutil diff --git a/vendor/github.com/openshift/library-go/pkg/build/envresolve/env.go b/vendor/github.com/openshift/library-go/pkg/build/envresolve/env.go new file mode 100644 index 00000000000..07663c34d16 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/envresolve/env.go @@ -0,0 +1,115 @@ +package envresolve + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" +) + +// ResourceStore defines a new resource store data structure +type ResourceStore struct { + SecretStore map[string]*corev1.Secret + ConfigMapStore map[string]*corev1.ConfigMap +} + +// NewResourceStore returns a pointer to a new resource store data structure +func NewResourceStore() *ResourceStore { + return &ResourceStore{ + SecretStore: make(map[string]*corev1.Secret), + ConfigMapStore: make(map[string]*corev1.ConfigMap), + } +} + +// getSecretRefValue returns the value of a secret in the supplied namespace +func getSecretRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, secretSelector *corev1.SecretKeySelector) (string, error) { + secret, ok := store.SecretStore[secretSelector.Name] + if !ok { + var err error + secret, err = client.CoreV1().Secrets(namespace).Get(secretSelector.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + store.SecretStore[secretSelector.Name] = secret + } + if data, ok := secret.Data[secretSelector.Key]; ok { + return string(data), nil + } + return "", fmt.Errorf("key %s not found in secret %s", secretSelector.Key, secretSelector.Name) + +} + +// getConfigMapRefValue returns the value of a configmap in the supplied namespace +func getConfigMapRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, configMapSelector *corev1.ConfigMapKeySelector) (string, error) { + configMap, ok := store.ConfigMapStore[configMapSelector.Name] + if !ok { + var err error + configMap, err = client.CoreV1().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + store.ConfigMapStore[configMapSelector.Name] = configMap + } + if data, ok := configMap.Data[configMapSelector.Key]; ok { + return string(data), nil + } + return "", fmt.Errorf("key %s not found in config map %s", configMapSelector.Key, configMapSelector.Name) +} + +// getFieldRef returns the value of the supplied path in the given object +func getFieldRef(obj runtime.Object, from *corev1.EnvVarSource) (string, error) { + return ExtractFieldPathAsString(obj, from.FieldRef.FieldPath) +} + +// getResourceFieldRef returns the value of a resource in the given container +func getResourceFieldRef(from *corev1.EnvVarSource, c *corev1.Container) (string, error) { + return ExtractContainerResourceValue(from.ResourceFieldRef, c) +} + +// GenEnvVarRefValue returns the value referenced by the supplied EnvVarSource given the other supplied information +func GetEnvVarRefValue(kc kubernetes.Interface, ns string, store *ResourceStore, from *corev1.EnvVarSource, obj runtime.Object, c *corev1.Container) (string, error) { + if from.SecretKeyRef != nil { + return getSecretRefValue(kc, ns, store, from.SecretKeyRef) + } + + if from.ConfigMapKeyRef != nil { + return getConfigMapRefValue(kc, ns, store, from.ConfigMapKeyRef) + } + + if from.FieldRef != nil { + return getFieldRef(obj, from) + } + + if from.ResourceFieldRef != nil { + return getResourceFieldRef(from, c) + } + + return "", fmt.Errorf("invalid valueFrom") +} + +// GenEnvVarRefString returns a text description of the supplied EnvVarSource +func GetEnvVarRefString(from *corev1.EnvVarSource) string { + if from.ConfigMapKeyRef != nil { + return fmt.Sprintf("configmap %s, key %s", from.ConfigMapKeyRef.Name, from.ConfigMapKeyRef.Key) + } + + if from.SecretKeyRef != nil { + return fmt.Sprintf("secret %s, key %s", from.SecretKeyRef.Name, from.SecretKeyRef.Key) + } + + if from.FieldRef != nil { + return fmt.Sprintf("field path %s", from.FieldRef.FieldPath) + } + + if from.ResourceFieldRef != nil { + containerPrefix := "" + if from.ResourceFieldRef.ContainerName != "" { + containerPrefix = fmt.Sprintf("%s/", from.ResourceFieldRef.ContainerName) + } + return fmt.Sprintf("resource field %s%s", containerPrefix, from.ResourceFieldRef.Resource) + } + + return "invalid valueFrom" +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/envresolve/kube_copy.go b/vendor/github.com/openshift/library-go/pkg/build/envresolve/kube_copy.go new file mode 100644 index 00000000000..1d01fa42457 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/envresolve/kube_copy.go @@ -0,0 +1,150 @@ +package envresolve + +import ( + "fmt" + "math" + "strconv" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// these are all from fieldpath.go + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := sets.NewString() + for key := range m { + keys.Insert(key) + } + for _, key := range keys.List() { + fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} + +// these are from api/v1/helpers.go + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "limits.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + case "requests.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// convertResourceCPUToString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns +// ceiling of the value. +func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/naming/namer.go b/vendor/github.com/openshift/library-go/pkg/build/naming/namer.go new file mode 100644 index 00000000000..1e06745b350 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/naming/namer.go @@ -0,0 +1,73 @@ +package naming + +import ( + "fmt" + "hash/fnv" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" +) + +// GetName returns a name given a base ("deployment-5") and a suffix ("deploy") +// It will first attempt to join them with a dash. If the resulting name is longer +// than maxLength: if the suffix is too long, it will truncate the base name and add +// an 8-character hash of the [base]-[suffix] string. If the suffix is not too long, +// it will truncate the base, add the hash of the base and return [base]-[hash]-[suffix] +func GetName(base, suffix string, maxLength int) string { + if maxLength <= 0 { + return "" + } + name := fmt.Sprintf("%s-%s", base, suffix) + if len(name) <= maxLength { + return name + } + + baseLength := maxLength - 10 /*length of -hash-*/ - len(suffix) + + // if the suffix is too long, ignore it + if baseLength < 0 { + prefix := base[0:min(len(base), max(0, maxLength-9))] + // Calculate hash on initial base-suffix string + shortName := fmt.Sprintf("%s-%s", prefix, hash(name)) + return shortName[:min(maxLength, len(shortName))] + } + + prefix := base[0:baseLength] + // Calculate hash on initial base-suffix string + return fmt.Sprintf("%s-%s-%s", prefix, hash(base), suffix) +} + +// GetPodName calls GetName with the length restriction for pods +func GetPodName(base, suffix string) string { + return GetName(base, suffix, kvalidation.DNS1123SubdomainMaxLength) +} + +// GetConfigMapName calls GetName with the length restriction for ConfigMaps +func GetConfigMapName(base, suffix string) string { + return GetName(base, suffix, kvalidation.DNS1123SubdomainMaxLength) +} + +// max returns the greater of its 2 inputs +func max(a, b int) int { + if b > a { + return b + } + return a +} + +// min returns the lesser of its 2 inputs +func min(a, b int) int { + if b < a { + return b + } + return a +} + +// hash calculates the hexadecimal representation (8-chars) +// of the hash of the passed in string using the FNV-a algorithm +func hash(s string) string { + hash := fnv.New32a() + hash.Write([]byte(s)) + intHash := hash.Sum32() + result := fmt.Sprintf("%08x", intHash) + return result +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/naming/namer_test.go b/vendor/github.com/openshift/library-go/pkg/build/naming/namer_test.go new file mode 100644 index 00000000000..33db1a80122 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/naming/namer_test.go @@ -0,0 +1,101 @@ +package naming + +import ( + "math/rand" + "testing" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" +) + +func TestGetName(t *testing.T) { + for i := 0; i < 10; i++ { + shortName := randSeq(rand.Intn(kvalidation.DNS1123SubdomainMaxLength-1) + 1) + longName := randSeq(kvalidation.DNS1123SubdomainMaxLength + rand.Intn(100)) + + tests := []struct { + base, suffix, expected string + }{ + { + base: shortName, + suffix: "deploy", + expected: shortName + "-deploy", + }, + { + base: longName, + suffix: "deploy", + expected: longName[:kvalidation.DNS1123SubdomainMaxLength-16] + "-" + hash(longName) + "-deploy", + }, + { + base: shortName, + suffix: longName, + expected: shortName + "-" + hash(shortName+"-"+longName), + }, + { + base: "", + suffix: shortName, + expected: "-" + shortName, + }, + { + base: "", + suffix: longName, + expected: "-" + hash("-"+longName), + }, + { + base: shortName, + suffix: "", + expected: shortName + "-", + }, + { + base: longName, + suffix: "", + expected: longName[:kvalidation.DNS1123SubdomainMaxLength-10] + "-" + hash(longName) + "-", + }, + } + + for _, test := range tests { + result := GetName(test.base, test.suffix, kvalidation.DNS1123SubdomainMaxLength) + if result != test.expected { + t.Errorf("Got unexpected result. Expected: %s Got: %s", test.expected, result) + } + } + } +} + +func TestGetNameIsDifferent(t *testing.T) { + shortName := randSeq(32) + deployerName := GetName(shortName, "deploy", kvalidation.DNS1123SubdomainMaxLength) + builderName := GetName(shortName, "build", kvalidation.DNS1123SubdomainMaxLength) + if deployerName == builderName { + t.Errorf("Expecting names to be different: %s\n", deployerName) + } + longName := randSeq(kvalidation.DNS1123SubdomainMaxLength + 10) + deployerName = GetName(longName, "deploy", kvalidation.DNS1123SubdomainMaxLength) + builderName = GetName(longName, "build", kvalidation.DNS1123SubdomainMaxLength) + if deployerName == builderName { + t.Errorf("Expecting names to be different: %s\n", deployerName) + } +} + +func TestGetNameReturnShortNames(t *testing.T) { + base := randSeq(32) + for maxLength := 0; maxLength < len(base)+2; maxLength++ { + for suffixLen := 0; suffixLen <= maxLength+1; suffixLen++ { + suffix := randSeq(suffixLen) + got := GetName(base, suffix, maxLength) + if len(got) > maxLength { + t.Fatalf("len(GetName(%[1]q, %[2]q, %[3]d)) = len(%[4]q) = %[5]d; want %[3]d", base, suffix, maxLength, got, len(got)) + } + } + } +} + +// From k8s.io/kubernetes/pkg/api/generator.go +var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789-") + +func randSeq(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/vendor/github.com/openshift/library-go/pkg/certs/pem.go b/vendor/github.com/openshift/library-go/pkg/certs/pem.go new file mode 100644 index 00000000000..c3f7ff3065e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/certs/pem.go @@ -0,0 +1,57 @@ +package certs + +import ( + "bytes" + "encoding/pem" + "io/ioutil" + "os" + "path/filepath" +) + +const ( + // StringSourceEncryptedBlockType is the PEM block type used to store an encrypted string + StringSourceEncryptedBlockType = "ENCRYPTED STRING" + // StringSourceKeyBlockType is the PEM block type used to store an encrypting key + StringSourceKeyBlockType = "ENCRYPTING KEY" +) + +func BlockFromFile(path string, blockType string) (*pem.Block, bool, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, false, err + } + block, ok := BlockFromBytes(data, blockType) + return block, ok, nil +} + +func BlockFromBytes(data []byte, blockType string) (*pem.Block, bool) { + for { + block, remaining := pem.Decode(data) + if block == nil { + return nil, false + } + if block.Type == blockType { + return block, true + } + data = remaining + } +} + +func BlockToFile(path string, block *pem.Block, mode os.FileMode) error { + b, err := BlockToBytes(block) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(path, b, mode) +} + +func BlockToBytes(block *pem.Block) ([]byte, error) { + b := bytes.Buffer{} + if err := pem.Encode(&b, block); err != nil { + return nil, err + } + return b.Bytes(), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/certs/util.go b/vendor/github.com/openshift/library-go/pkg/certs/util.go new file mode 100644 index 00000000000..5ec6354a50f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/certs/util.go @@ -0,0 +1,70 @@ +package certs + +import ( + "crypto/x509" + "fmt" + "strings" + "time" +) + +const defaultOutputTimeFormat = "Jan 2 15:04:05 2006" + +// nowFn is used in unit test to freeze time. +var nowFn = time.Now().UTC + +// CertificateToString converts a certificate into a human readable string. +// This function should guarantee consistent output format for must-gather tooling and any code +// that prints the certificate details. +func CertificateToString(certificate *x509.Certificate) string { + humanName := certificate.Subject.CommonName + signerHumanName := certificate.Issuer.CommonName + + if certificate.Subject.CommonName == certificate.Issuer.CommonName { + signerHumanName = "" + } + + usages := []string{} + for _, curr := range certificate.ExtKeyUsage { + if curr == x509.ExtKeyUsageClientAuth { + usages = append(usages, "client") + continue + } + if curr == x509.ExtKeyUsageServerAuth { + usages = append(usages, "serving") + continue + } + + usages = append(usages, fmt.Sprintf("%d", curr)) + } + + validServingNames := []string{} + for _, ip := range certificate.IPAddresses { + validServingNames = append(validServingNames, ip.String()) + } + for _, dnsName := range certificate.DNSNames { + validServingNames = append(validServingNames, dnsName) + } + + servingString := "" + if len(validServingNames) > 0 { + servingString = fmt.Sprintf(" validServingFor=[%s]", strings.Join(validServingNames, ",")) + } + + groupString := "" + if len(certificate.Subject.Organization) > 0 { + groupString = fmt.Sprintf(" groups=[%s]", strings.Join(certificate.Subject.Organization, ",")) + } + + return fmt.Sprintf("%q [%s]%s%s issuer=%q (%v to %v (now=%v))", humanName, strings.Join(usages, ","), groupString, + servingString, signerHumanName, certificate.NotBefore.UTC().Format(defaultOutputTimeFormat), + certificate.NotAfter.UTC().Format(defaultOutputTimeFormat), nowFn().Format(defaultOutputTimeFormat)) +} + +// CertificateBundleToString converts a certificate bundle into a human readable string. +func CertificateBundleToString(bundle []*x509.Certificate) string { + output := []string{} + for i, cert := range bundle { + output = append(output, fmt.Sprintf("[#%d]: %s", i, CertificateToString(cert))) + } + return strings.Join(output, "\n") +} diff --git a/vendor/github.com/openshift/library-go/pkg/certs/util_test.go b/vendor/github.com/openshift/library-go/pkg/certs/util_test.go new file mode 100644 index 00000000000..b97e9a1a1c4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/certs/util_test.go @@ -0,0 +1,101 @@ +package certs + +import ( + "crypto/x509" + "crypto/x509/pkix" + "net" + "testing" + "time" +) + +func init() { + nowFn = func() time.Time { + return time.Date(2019, time.January, 1, 0, 0, 0, 0, &time.Location{}) + } +} + +func TestCertificateToString(t *testing.T) { + tests := []struct { + name string + cert *x509.Certificate + expected string + }{ + { + name: "empty cert", + cert: &x509.Certificate{}, + expected: `"" [] issuer="" (Jan 1 00:00:00 0001 to Jan 1 00:00:00 0001 (now=Jan 1 00:00:00 2019))`, + }, + { + name: "common name", + cert: &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "test-subject", + }, + Issuer: pkix.Name{ + CommonName: "test-issuer", + }, + }, + expected: `"test-subject" [] issuer="test-issuer" (Jan 1 00:00:00 0001 to Jan 1 00:00:00 0001 (now=Jan 1 00:00:00 2019))`, + }, + { + name: "self-signed", + cert: &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "test-issuer", + }, + Issuer: pkix.Name{ + CommonName: "test-issuer", + }, + }, + expected: `"test-issuer" [] issuer="" (Jan 1 00:00:00 0001 to Jan 1 00:00:00 0001 (now=Jan 1 00:00:00 2019))`, + }, + { + name: "valid serving for", + cert: &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "test-subject", + }, + Issuer: pkix.Name{ + CommonName: "test-issuer", + }, + IPAddresses: []net.IP{net.IPv4('1', '2', '3', '4')}, + }, + expected: `"test-subject" [] validServingFor=[49.50.51.52] issuer="test-issuer" (Jan 1 00:00:00 0001 to Jan 1 00:00:00 0001 (now=Jan 1 00:00:00 2019))`, + }, + { + name: "organization", + cert: &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "test-subject", + Organization: []string{"foo", "bar"}, + }, + Issuer: pkix.Name{ + CommonName: "test-issuer", + }, + }, + expected: `"test-subject" [] groups=[foo,bar] issuer="test-issuer" (Jan 1 00:00:00 0001 to Jan 1 00:00:00 0001 (now=Jan 1 00:00:00 2019))`, + }, + { + name: "client auth", + cert: &x509.Certificate{ + Subject: pkix.Name{ + CommonName: "test-subject", + }, + Issuer: pkix.Name{ + CommonName: "test-issuer", + }, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, + expected: `"test-subject" [client] issuer="test-issuer" (Jan 1 00:00:00 0001 to Jan 1 00:00:00 0001 (now=Jan 1 00:00:00 2019))`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + out := CertificateToString(test.cert) + if out != test.expected { + t.Errorf("expected %q, got %q", test.expected, out) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go new file mode 100644 index 00000000000..a247311057b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/client/client_config.go @@ -0,0 +1,131 @@ +package client + +import ( + "io/ioutil" + "net" + "net/http" + "time" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + configv1 "github.com/openshift/api/config/v1" +) + +// GetKubeConfigOrInClusterConfig loads in-cluster config if kubeConfigFile is empty or the file if not, +// then applies overrides. +func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides *ClientConnectionOverrides) (*rest.Config, error) { + if len(kubeConfigFile) > 0 { + return GetClientConfig(kubeConfigFile, overrides) + } + + clientConfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + + applyClientConnectionOverrides(overrides, clientConfig) + + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} + if overrides != nil { + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + } + clientConfig.WrapTransport = t.DefaultClientTransport + + return clientConfig, nil +} + +// GetClientConfig returns the rest.Config for a kubeconfig file +func GetClientConfig(kubeConfigFile string, overrides *ClientConnectionOverrides) (*rest.Config, error) { + kubeConfigBytes, err := ioutil.ReadFile(kubeConfigFile) + if err != nil { + return nil, err + } + kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) + if err != nil { + return nil, err + } + clientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + + t := ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport} + if overrides != nil { + t.MaxIdleConnsPerHost = overrides.MaxIdleConnsPerHost + } + clientConfig.WrapTransport = t.DefaultClientTransport + + return clientConfig, nil +} + +// applyClientConnectionOverrides updates a kubeConfig with the overrides from the config. +func applyClientConnectionOverrides(overrides *ClientConnectionOverrides, kubeConfig *rest.Config) { + if overrides == nil { + return + } + if overrides.QPS > 0 { + kubeConfig.QPS = overrides.QPS + } + if overrides.Burst > 0 { + kubeConfig.Burst = int(overrides.Burst) + } + if len(overrides.AcceptContentTypes) > 0 { + kubeConfig.ContentConfig.AcceptContentTypes = overrides.AcceptContentTypes + } + if len(overrides.ContentType) > 0 { + kubeConfig.ContentConfig.ContentType = overrides.ContentType + } + + // TODO both of these default values look wrong + // if we have no preferences at this point, claim that we accept both proto and json. We will get proto if the server supports it. + // this is a slightly niggly thing. If the server has proto and our client does not (possible, but not super likely) then this fails. + if len(kubeConfig.ContentConfig.AcceptContentTypes) == 0 { + kubeConfig.ContentConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + } + if len(kubeConfig.ContentConfig.ContentType) == 0 { + kubeConfig.ContentConfig.ContentType = "application/vnd.kubernetes.protobuf" + } +} + +type ClientTransportOverrides struct { + WrapTransport func(rt http.RoundTripper) http.RoundTripper + MaxIdleConnsPerHost int +} + +// defaultClientTransport sets defaults for a client Transport that are suitable for use by infrastructure components. +func (c ClientTransportOverrides) DefaultClientTransport(rt http.RoundTripper) http.RoundTripper { + transport, ok := rt.(*http.Transport) + if !ok { + return rt + } + + transport.DialContext = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext + + // Hold open more internal idle connections + transport.MaxIdleConnsPerHost = 100 + if c.MaxIdleConnsPerHost > 0 { + transport.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost + } + + if c.WrapTransport == nil { + return transport + + } + return c.WrapTransport(transport) +} + +// ClientConnectionOverrides allows overriding values for rest.Config not held in a kubeconfig. Most commonly used +// for QPS. Empty values are not used. +type ClientConnectionOverrides struct { + configv1.ClientConnectionOverrides + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle (keep-alive) connections to keep per-host:port. + // If zero, DefaultMaxIdleConnsPerHost is used. + // TODO roll this into the connection overrides in api + MaxIdleConnsPerHost int +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go new file mode 100644 index 00000000000..c2ddfd99566 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status.go @@ -0,0 +1,140 @@ +package v1helpers + +import ( + "bytes" + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/json" + + configv1 "github.com/openshift/api/config/v1" +) + +// SetStatusCondition sets the corresponding condition in conditions to newCondition. +func SetStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, newCondition configv1.ClusterOperatorStatusCondition) { + if conditions == nil { + conditions = &[]configv1.ClusterOperatorStatusCondition{} + } + existingCondition := FindStatusCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// RemoveStatusCondition removes the corresponding conditionType from conditions. +func RemoveStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) { + if conditions == nil { + conditions = &[]configv1.ClusterOperatorStatusCondition{} + } + newConditions := []configv1.ClusterOperatorStatusCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +// FindStatusCondition finds the conditionType in conditions. +func FindStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +// GetStatusDiff returns a string representing change in condition status in human readable form. +func GetStatusDiff(oldStatus configv1.ClusterOperatorStatus, newStatus configv1.ClusterOperatorStatus) string { + messages := []string{} + for _, newCondition := range newStatus.Conditions { + existingStatusCondition := FindStatusCondition(oldStatus.Conditions, newCondition.Type) + if existingStatusCondition == nil { + messages = append(messages, fmt.Sprintf("%s set to %s (%q)", newCondition.Type, newCondition.Status, newCondition.Message)) + continue + } + if existingStatusCondition.Status != newCondition.Status { + messages = append(messages, fmt.Sprintf("%s changed from %s to %s (%q)", existingStatusCondition.Type, existingStatusCondition.Status, newCondition.Status, newCondition.Message)) + continue + } + if existingStatusCondition.Message != newCondition.Message { + messages = append(messages, fmt.Sprintf("%s message changed from %q to %q", existingStatusCondition.Type, existingStatusCondition.Message, newCondition.Message)) + } + } + for _, oldCondition := range oldStatus.Conditions { + // This should not happen. It means we removed old condition entirely instead of just changing its status + if c := FindStatusCondition(newStatus.Conditions, oldCondition.Type); c == nil { + messages = append(messages, fmt.Sprintf("%s was removed", oldCondition.Type)) + } + } + + if !equality.Semantic.DeepEqual(oldStatus.RelatedObjects, newStatus.RelatedObjects) { + messages = append(messages, fmt.Sprintf("status.relatedObjects changed from %q to %q", oldStatus.RelatedObjects, newStatus.RelatedObjects)) + } + if !equality.Semantic.DeepEqual(oldStatus.Extension, newStatus.Extension) { + messages = append(messages, fmt.Sprintf("status.extension changed from %q to %q", oldStatus.Extension, newStatus.Extension)) + } + + if len(messages) == 0 { + // ignore errors + originalJSON := &bytes.Buffer{} + json.NewEncoder(originalJSON).Encode(oldStatus) + newJSON := &bytes.Buffer{} + json.NewEncoder(newJSON).Encode(newStatus) + messages = append(messages, diff.StringDiff(originalJSON.String(), newJSON.String())) + } + + return strings.Join(messages, ",") +} + +// IsStatusConditionTrue returns true when the conditionType is present and set to `configv1.ConditionTrue` +func IsStatusConditionTrue(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionTrue) +} + +// IsStatusConditionFalse returns true when the conditionType is present and set to `configv1.ConditionFalse` +func IsStatusConditionFalse(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType) bool { + return IsStatusConditionPresentAndEqual(conditions, conditionType, configv1.ConditionFalse) +} + +// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status. +func IsStatusConditionPresentAndEqual(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status configv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// IsStatusConditionNotIn returns true when the conditionType does not match the status. +func IsStatusConditionNotIn(conditions []configv1.ClusterOperatorStatusCondition, conditionType configv1.ClusterStatusConditionType, status ...configv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + for _, s := range status { + if s == condition.Status { + return false + } + } + return true + } + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status_test.go b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status_test.go new file mode 100644 index 00000000000..b8e72fee5b6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers/status_test.go @@ -0,0 +1,85 @@ +package v1helpers + +import ( + "reflect" + "strings" + "testing" + + configv1 "github.com/openshift/api/config/v1" +) + +func TestGetStatusConditionDiff(t *testing.T) { + tests := []struct { + name string + newConditions []configv1.ClusterOperatorStatusCondition + oldConditions []configv1.ClusterOperatorStatusCondition + expectedMessages []string + }{ + { + name: "new condition", + newConditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.RetrievedUpdates, + Status: configv1.ConditionTrue, + Message: "test", + }, + }, + expectedMessages: []string{`RetrievedUpdates set to True ("test")`}, + }, + { + name: "condition status change", + newConditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.RetrievedUpdates, + Status: configv1.ConditionFalse, + Message: "test", + }, + }, + oldConditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.RetrievedUpdates, + Status: configv1.ConditionTrue, + Message: "test", + }, + }, + expectedMessages: []string{`RetrievedUpdates changed from True to False ("test")`}, + }, + { + name: "condition message change", + newConditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.RetrievedUpdates, + Status: configv1.ConditionTrue, + Message: "foo", + }, + }, + oldConditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.RetrievedUpdates, + Status: configv1.ConditionTrue, + Message: "bar", + }, + }, + expectedMessages: []string{`RetrievedUpdates message changed from "bar" to "foo"`}, + }, + { + name: "condition message deleted", + oldConditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.RetrievedUpdates, + Status: configv1.ConditionTrue, + Message: "test", + }, + }, + expectedMessages: []string{"RetrievedUpdates was removed"}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := GetStatusDiff(configv1.ClusterOperatorStatus{Conditions: test.oldConditions}, configv1.ClusterOperatorStatus{Conditions: test.newConditions}) + if !reflect.DeepEqual(test.expectedMessages, strings.Split(result, ",")) { + t.Errorf("expected %#v, got %#v", test.expectedMessages, result) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go b/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go new file mode 100644 index 00000000000..0bd77a7f467 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/configdefaults/config_default.go @@ -0,0 +1,81 @@ +package configdefaults + +import ( + "time" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +func DefaultString(target *string, defaultVal string) { + if len(*target) == 0 { + *target = defaultVal + } +} + +func DefaultInt(target *int, defaultVal int) { + if *target == 0 { + *target = defaultVal + } +} + +func DefaultMetaDuration(target *time.Duration, defaultVal time.Duration) { + if *target == 0 { + *target = defaultVal + } +} + +func DefaultStringSlice(target *[]string, defaultVal []string) { + if len(*target) == 0 { + *target = defaultVal + } +} + +func SetRecommendedHTTPServingInfoDefaults(config *configv1.HTTPServingInfo) { + if config.MaxRequestsInFlight == 0 { + config.MaxRequestsInFlight = 3000 + } + if config.RequestTimeoutSeconds == 0 { + config.RequestTimeoutSeconds = 60 * 60 // one hour + } + + SetRecommendedServingInfoDefaults(&config.ServingInfo) +} + +func SetRecommendedServingInfoDefaults(config *configv1.ServingInfo) { + DefaultString(&config.BindAddress, "0.0.0.0:8443") + DefaultString(&config.BindNetwork, "tcp4") + DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/serving-cert/tls.key") + DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/serving-cert/tls.crt") + DefaultString(&config.ClientCA, "/var/run/configmaps/client-ca/ca-bundle.crt") + DefaultString(&config.MinTLSVersion, crypto.TLSVersionToNameOrDie(crypto.DefaultTLSVersion())) + + if len(config.CipherSuites) == 0 { + config.CipherSuites = crypto.CipherSuitesToNamesOrDie(crypto.DefaultCiphers()) + } +} + +func SetRecommendedGenericAPIServerConfigDefaults(config *configv1.GenericAPIServerConfig) { + SetRecommendedHTTPServingInfoDefaults(&config.ServingInfo) + SetRecommendedEtcdConnectionInfoDefaults(&config.StorageConfig.EtcdConnectionInfo) + SetRecommendedKubeClientConfigDefaults(&config.KubeClientConfig) +} + +func SetRecommendedEtcdConnectionInfoDefaults(config *configv1.EtcdConnectionInfo) { + DefaultStringSlice(&config.URLs, []string{"https://etcd.kube-system.svc:2379"}) + DefaultString(&config.CertInfo.KeyFile, "/var/run/secrets/etcd-client/tls.key") + DefaultString(&config.CertInfo.CertFile, "/var/run/secrets/etcd-client/tls.crt") + DefaultString(&config.CA, "/var/run/configmaps/etcd-serving-ca/ca-bundle.crt") +} + +func SetRecommendedKubeClientConfigDefaults(config *configv1.KubeClientConfig) { + // these are historical values + if config.ConnectionOverrides.QPS <= 0 { + config.ConnectionOverrides.QPS = 150.0 + } + if config.ConnectionOverrides.Burst <= 0 { + config.ConnectionOverrides.Burst = 300 + } + DefaultString(&config.ConnectionOverrides.AcceptContentTypes, "application/vnd.kubernetes.protobuf,application/json") + DefaultString(&config.ConnectionOverrides.ContentType, "application/vnd.kubernetes.protobuf") +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go new file mode 100644 index 00000000000..f28ef543f16 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/client.go @@ -0,0 +1,71 @@ +package helpers + +import ( + "io/ioutil" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/config/client" +) + +// TODO this file needs to collapse with pkg/config/client. We cannot safely delegate from this file because this one +// TODO uses JSON and other uses protobuf. + +// GetKubeClientConfig loads in-cluster config if kubeConfigFile is empty or the file if not, then applies overrides. +func GetKubeClientConfig(kubeClientConnection configv1.KubeClientConfig) (*rest.Config, error) { + return GetKubeConfigOrInClusterConfig(kubeClientConnection.KubeConfig, kubeClientConnection.ConnectionOverrides) +} + +// GetKubeConfigOrInClusterConfig loads in-cluster config if kubeConfigFile is empty or the file if not, +// then applies overrides. +func GetKubeConfigOrInClusterConfig(kubeConfigFile string, overrides configv1.ClientConnectionOverrides) (*rest.Config, error) { + if len(kubeConfigFile) > 0 { + return GetClientConfig(kubeConfigFile, overrides) + } + + clientConfig, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport + + return clientConfig, nil +} + +func GetClientConfig(kubeConfigFile string, overrides configv1.ClientConnectionOverrides) (*rest.Config, error) { + kubeConfigBytes, err := ioutil.ReadFile(kubeConfigFile) + if err != nil { + return nil, err + } + kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) + if err != nil { + return nil, err + } + clientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, err + } + applyClientConnectionOverrides(overrides, clientConfig) + clientConfig.WrapTransport = client.ClientTransportOverrides{WrapTransport: clientConfig.WrapTransport}.DefaultClientTransport + + return clientConfig, nil +} + +// applyClientConnectionOverrides updates a kubeConfig with the overrides from the config. +func applyClientConnectionOverrides(overrides configv1.ClientConnectionOverrides, kubeConfig *rest.Config) { + if overrides.QPS != 0 { + kubeConfig.QPS = overrides.QPS + } + if overrides.Burst != 0 { + kubeConfig.Burst = int(overrides.Burst) + } + if len(overrides.AcceptContentTypes) != 0 { + kubeConfig.ContentConfig.AcceptContentTypes = overrides.AcceptContentTypes + } + if len(overrides.ContentType) != 0 { + kubeConfig.ContentConfig.ContentType = overrides.ContentType + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go new file mode 100644 index 00000000000..21d4d24f173 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/config_refs.go @@ -0,0 +1,145 @@ +package helpers + +import ( + "strings" + + configv1 "github.com/openshift/api/config/v1" +) + +func GetHTTPServingInfoFileReferences(config *configv1.HTTPServingInfo) []*string { + if config == nil { + return []*string{} + } + + return GetServingInfoFileReferences(&config.ServingInfo) +} + +func GetServingInfoFileReferences(config *configv1.ServingInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.ClientCA) + for i := range config.NamedCertificates { + refs = append(refs, &config.NamedCertificates[i].CertFile) + refs = append(refs, &config.NamedCertificates[i].KeyFile) + } + + return refs +} + +func GetCertFileReferences(config *configv1.CertInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.CertFile) + refs = append(refs, &config.KeyFile) + return refs +} + +func GetRemoteConnectionInfoFileReferences(config *configv1.RemoteConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetEtcdConnectionInfoFileReferences(config *configv1.EtcdConnectionInfo) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetCertFileReferences(&config.CertInfo)...) + refs = append(refs, &config.CA) + return refs +} + +func GetStringSourceFileReferences(s *configv1.StringSource) []*string { + if s == nil { + return []*string{} + } + + return []*string{ + &s.File, + &s.KeyFile, + } +} + +func GetAdmissionPluginConfigFileReferences(config *configv1.AdmissionPluginConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.Location) + return refs +} + +func GetAuditConfigFileReferences(config *configv1.AuditConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.PolicyFile) + refs = append(refs, &config.AuditFilePath) + return refs +} + +func GetKubeClientConfigFileReferences(config *configv1.KubeClientConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, &config.KubeConfig) + return refs +} + +func GetGenericAPIServerConfigFileReferences(config *configv1.GenericAPIServerConfig) []*string { + if config == nil { + return []*string{} + } + + refs := []*string{} + refs = append(refs, GetHTTPServingInfoFileReferences(&config.ServingInfo)...) + refs = append(refs, GetEtcdConnectionInfoFileReferences(&config.StorageConfig.EtcdConnectionInfo)...) + refs = append(refs, GetAuditConfigFileReferences(&config.AuditConfig)...) + refs = append(refs, GetKubeClientConfigFileReferences(&config.KubeClientConfig)...) + + // TODO admission config file resolution is currently broken. + //for k := range config.AdmissionPluginConfig { + // refs = append(refs, GetAdmissionPluginConfigReferences(&(config.AdmissionPluginConfig[k]))...) + //} + return refs +} + +func GetFlagsWithFileExtensionsFileReferences(args map[string][]string) []*string { + if args == nil { + return []*string{} + } + + refs := []*string{} + for key, s := range args { + if len(s) == 0 { + continue + } + if !strings.HasSuffix(key, "-file") && !strings.HasSuffix(key, "-dir") { + continue + } + for i := range s { + refs = append(refs, &s[i]) + } + } + + return refs +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go new file mode 100644 index 00000000000..fa7e4b46510 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/general.go @@ -0,0 +1,64 @@ +package helpers + +import ( + "fmt" + "path/filepath" + "strings" +) + +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory. +// Empty and "-" paths are never resolved. +func ResolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths, or "-" + if len(*ref) > 0 && *ref != "-" { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +func makeRelative(path, base string) (string, error) { + if len(path) > 0 && path != "-" { + rel, err := filepath.Rel(base, path) + if err != nil { + return path, err + } + return rel, nil + } + return path, nil +} + +// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. +// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error +// Empty and "-" paths are never relativized. +func RelativizePathWithNoBacksteps(refs []*string, base string) error { + for _, ref := range refs { + // Don't relativize empty paths, or "-" + if len(*ref) > 0 && *ref != "-" { + rel, err := makeRelative(*ref, base) + if err != nil { + return err + } + + if rel == "-" { + rel = "./-" + } + + // if we have a backstep, don't mess with the path + if strings.HasPrefix(rel, "../") { + if filepath.IsAbs(*ref) { + continue + } + + return fmt.Errorf("%v requires backsteps and is not absolute", *ref) + } + + *ref = rel + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/general_test.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/general_test.go new file mode 100644 index 00000000000..65c76c32cc5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/general_test.go @@ -0,0 +1,54 @@ +package helpers + +import ( + "fmt" + "testing" +) + +func TestResolvePaths(t *testing.T) { + tests := []struct { + ref, base, expected string + }{ + {"", "/foo", ""}, + {"-", "/foo", "-"}, + {"bar", "/foo", "/foo/bar"}, + {"..", "/foo", "/"}, + {"/bar", "/foo", "/bar"}, + {"bar/-", "/foo", "/foo/bar/-"}, + {"./-", "/foo", "/foo/-"}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%s onto %s", tt.ref, tt.base), func(t *testing.T) { + x := tt.ref + if err := ResolvePaths([]*string{&x}, tt.base); err != nil { + t.Errorf("unexpected error: %v", err) + } + if x != tt.expected { + t.Errorf("unexpected result %q, expected %q", x, tt.expected) + } + }) + } +} + +func TestRelativizePathWithNoBacksteps(t *testing.T) { + tests := []struct { + ref, base, expected string + }{ + {"/foo/", "/foo", "."}, + {"-", "/foo", "-"}, + {"/foo/bar", "/foo", "bar"}, + {"/abc", "/foo", "/abc"}, + {"/foo/-", "/foo", "./-"}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("%s onto %s", tt.ref, tt.base), func(t *testing.T) { + x := tt.ref + if err := RelativizePathWithNoBacksteps([]*string{&x}, tt.base); err != nil { + t.Errorf("unexpected error: %v", err) + } + if x != tt.expected { + t.Errorf("unexpected result %q, expected %q", x, tt.expected) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go new file mode 100644 index 00000000000..0c68ee27c05 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go @@ -0,0 +1,167 @@ +package helpers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog" + "sigs.k8s.io/yaml" +) + +// InstallFunc is the "normal" function for installing scheme +type InstallFunc func(scheme *runtime.Scheme) error + +// ReadYAMLToInternal reads content of a reader and returns the runtime.Object that matches it. It chooses the match from +// the scheme installation that you provide. It converts to internal for you. +func ReadYAMLToInternal(reader io.Reader, schemeFns ...InstallFunc) (runtime.Object, error) { + if reader == nil || reflect.ValueOf(reader).IsNil() { + return nil, nil + } + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + jsonData, err := kyaml.ToJSON(data) + if err != nil { + // maybe we were already json + jsonData = data + } + + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) + + obj, err := runtime.Decode(codec, jsonData) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", jsonData, err) + } + // make sure there are no extra fields in jsonData + if err := strictDecodeCheck(jsonData, obj, scheme); err != nil { + return nil, err + } + + return obj, nil +} + +// ReadYAML reads content of a reader and returns the runtime.Object that matches it. It chooses the match from +// the scheme installation that you provide. It does not convert and it does not default. +func ReadYAML(reader io.Reader, schemeFns ...InstallFunc) (runtime.Object, error) { + if reader == nil || reflect.ValueOf(reader).IsNil() { + return nil, nil + } + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + jsonData, err := kyaml.ToJSON(data) + if err != nil { + // maybe we were already json + jsonData = data + } + + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).UniversalDeserializer() + + obj, err := runtime.Decode(codec, jsonData) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", jsonData, err) + } + // make sure there are no extra fields in jsonData + if err := strictDecodeCheck(jsonData, obj, scheme); err != nil { + return nil, err + } + + return obj, nil +} + +// TODO: we ultimately want a better decoder for JSON that allows us exact line numbers and better +// surrounding text description. This should be removed / replaced when that happens. +func captureSurroundingJSONForError(prefix string, data []byte, err error) error { + if syntaxErr, ok := err.(*json.SyntaxError); err != nil && ok { + offset := syntaxErr.Offset + begin := offset - 20 + if begin < 0 { + begin = 0 + } + end := offset + 20 + if end > int64(len(data)) { + end = int64(len(data)) + } + return fmt.Errorf("%s%v (found near '%s')", prefix, err, string(data[begin:end])) + } + if err != nil { + return fmt.Errorf("%s%v", prefix, err) + } + return err +} + +// strictDecodeCheck fails decodes when jsonData contains fields not included in the external version of obj +func strictDecodeCheck(jsonData []byte, obj runtime.Object, scheme *runtime.Scheme) error { + out, err := getExternalZeroValue(obj, scheme) // we need the external version of obj as that has the correct JSON struct tags + if err != nil { + klog.Errorf("Encountered config error %v in object %T, raw JSON:\n%s", err, obj, string(jsonData)) // TODO just return the error and die + // never error for now, we need to determine a safe way to make this check fatal + return nil + } + d := json.NewDecoder(bytes.NewReader(jsonData)) + d.DisallowUnknownFields() + // note that we only care about the error, out is discarded + if err := d.Decode(out); err != nil { + klog.Errorf("Encountered config error %v in object %T, raw JSON:\n%s", err, obj, string(jsonData)) // TODO just return the error and die + } + // never error for now, we need to determine a safe way to make this check fatal + return nil +} + +// getExternalZeroValue returns the zero value of the external version of obj +func getExternalZeroValue(obj runtime.Object, scheme *runtime.Scheme) (runtime.Object, error) { + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { // should never happen + return nil, fmt.Errorf("no gvks found for %#v", obj) + } + return scheme.New(gvks[0]) +} + +// WriteYAML serializes a yaml file based on the scheme functions provided +func WriteYAML(obj runtime.Object, schemeFns ...InstallFunc) ([]byte, error) { + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) + + json, err := runtime.Encode(codec, obj) + if err != nil { + return nil, err + } + + content, err := yaml.JSONToYAML(json) + if err != nil { + return nil, err + } + return content, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go new file mode 100644 index 00000000000..7bdcc30568d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go @@ -0,0 +1,103 @@ +package leaderelection + +import ( + "fmt" + "io/ioutil" + "strings" + "time" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/client-go/kubernetes" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + "k8s.io/client-go/tools/record" + + configv1 "github.com/openshift/api/config/v1" +) + +// ToConfigMapLeaderElection returns a leader election config that you just need to fill in the Callback for. Don't forget the callbacks! +func ToConfigMapLeaderElection(clientConfig *rest.Config, config configv1.LeaderElection, component, identity string) (leaderelection.LeaderElectionConfig, error) { + kubeClient, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return leaderelection.LeaderElectionConfig{}, err + } + + if len(identity) == 0 { + identity = string(uuid.NewUUID()) + } + if len(config.Namespace) == 0 { + return leaderelection.LeaderElectionConfig{}, fmt.Errorf("namespace may not be empty") + } + if len(config.Name) == 0 { + return leaderelection.LeaderElectionConfig{}, fmt.Errorf("name may not be empty") + } + + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")}) + eventRecorder := eventBroadcaster.NewRecorder(clientgoscheme.Scheme, corev1.EventSource{Component: component}) + rl, err := resourcelock.New( + resourcelock.ConfigMapsResourceLock, + config.Namespace, + config.Name, + kubeClient.CoreV1(), + kubeClient.CoordinationV1(), + resourcelock.ResourceLockConfig{ + Identity: identity, + EventRecorder: eventRecorder, + }) + if err != nil { + return leaderelection.LeaderElectionConfig{}, err + } + + return leaderelection.LeaderElectionConfig{ + Lock: rl, + LeaseDuration: config.LeaseDuration.Duration, + RenewDeadline: config.RenewDeadline.Duration, + RetryPeriod: config.RetryPeriod.Duration, + Callbacks: leaderelection.LeaderCallbacks{ + OnStoppedLeading: func() { + klog.Fatalf("leaderelection lost") + }, + }, + }, nil +} + +// LeaderElectionDefaulting applies what we think are reasonable defaults. It does not mutate the original. +// We do defaulting outside the API so that we can change over time and know whether the user intended to override our values +// as opposed to simply getting the defaulted serialization at some point. +func LeaderElectionDefaulting(config configv1.LeaderElection, defaultNamespace, defaultName string) configv1.LeaderElection { + ret := *(&config).DeepCopy() + + if ret.LeaseDuration.Duration == 0 { + ret.LeaseDuration.Duration = 60 * time.Second + } + if ret.RenewDeadline.Duration == 0 { + ret.RenewDeadline.Duration = 35 * time.Second + } + if ret.RetryPeriod.Duration == 0 { + ret.RetryPeriod.Duration = 10 * time.Second + } + if len(ret.Namespace) == 0 { + if len(defaultNamespace) > 0 { + ret.Namespace = defaultNamespace + } else { + // Fall back to the namespace associated with the service account token, if available + if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + ret.Namespace = ns + } + } + } + } + if len(ret.Name) == 0 { + ret.Name = defaultName + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/options.go b/vendor/github.com/openshift/library-go/pkg/config/serving/options.go new file mode 100644 index 00000000000..bb710454df6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/options.go @@ -0,0 +1,51 @@ +package serving + +import ( + "fmt" + "net" + "strconv" + + genericapiserveroptions "k8s.io/apiserver/pkg/server/options" + utilflag "k8s.io/component-base/cli/flag" + + configv1 "github.com/openshift/api/config/v1" +) + +func ToServingOptions(servingInfo configv1.HTTPServingInfo) (*genericapiserveroptions.SecureServingOptionsWithLoopback, error) { + host, portString, err := net.SplitHostPort(servingInfo.BindAddress) + if err != nil { + return nil, fmt.Errorf("bindAddress is invalid: %v", err) + } + port, err := strconv.Atoi(portString) + if err != nil { + return nil, fmt.Errorf("bindAddress is invalid: %v", err) + } + if t := net.ParseIP(host); t == nil { + return nil, fmt.Errorf("bindAddress is invalid: %v", "not an IP") + } + + servingOptions := genericapiserveroptions.NewSecureServingOptions() + servingOptions.BindAddress = net.ParseIP(host) + servingOptions.BindPort = port + servingOptions.BindNetwork = servingInfo.BindNetwork + servingOptions.ServerCert.CertKey.CertFile = servingInfo.CertFile + servingOptions.ServerCert.CertKey.KeyFile = servingInfo.KeyFile + servingOptions.CipherSuites = servingInfo.CipherSuites + servingOptions.MinTLSVersion = servingInfo.MinTLSVersion + + for _, namedCert := range servingInfo.NamedCertificates { + genericNamedCertKey := utilflag.NamedCertKey{ + Names: namedCert.Names, + CertFile: namedCert.CertFile, + KeyFile: namedCert.KeyFile, + } + + servingOptions.SNICertKeys = append(servingOptions.SNICertKeys, genericNamedCertKey) + } + + // TODO sort out what we should do here + //servingOptions.HTTP2MaxStreamsPerConnection = ?? + + servingOptionsWithLoopback := servingOptions.WithLoopback() + return servingOptionsWithLoopback, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go new file mode 100644 index 00000000000..3869d5c2e92 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go @@ -0,0 +1,78 @@ +package serving + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" + genericapiserver "k8s.io/apiserver/pkg/server" + genericapiserveroptions "k8s.io/apiserver/pkg/server/options" + "k8s.io/klog" + + configv1 "github.com/openshift/api/config/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" +) + +func ToServerConfig(ctx context.Context, servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization, + kubeConfigFile string) (*genericapiserver.Config, error) { + scheme := runtime.NewScheme() + metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) + config := genericapiserver.NewConfig(serializer.NewCodecFactory(scheme)) + + servingOptions, err := ToServingOptions(servingInfo) + if err != nil { + return nil, err + } + + if err := servingOptions.ApplyTo(&config.SecureServing, &config.LoopbackClientConfig); err != nil { + return nil, err + } + + var lastApplyErr error + + pollCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + if !authenticationConfig.Disabled { + authenticationOptions := genericapiserveroptions.NewDelegatingAuthenticationOptions() + authenticationOptions.RemoteKubeConfigFile = kubeConfigFile + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authenticationOptions.ApplyTo(&config.Authentication, config.SecureServing, config.OpenAPIConfig) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authentication (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr + } + } + + if !authorizationConfig.Disabled { + authorizationOptions := genericapiserveroptions.NewDelegatingAuthorizationOptions() + authorizationOptions.RemoteKubeConfigFile = kubeConfigFile + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authorizationOptions.ApplyTo(&config.Authorization) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authorization (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr + } + } + + return config, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/validation/general.go b/vendor/github.com/openshift/library-go/pkg/config/validation/general.go new file mode 100644 index 00000000000..3a5dcd0b7fe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/validation/general.go @@ -0,0 +1,130 @@ +package validation + +import ( + "fmt" + "net" + "net/url" + "os" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +type ValidationResults struct { + Warnings field.ErrorList + Errors field.ErrorList +} + +func (r *ValidationResults) Append(additionalResults ValidationResults) { + r.AddErrors(additionalResults.Errors...) + r.AddWarnings(additionalResults.Warnings...) +} + +func (r *ValidationResults) AddErrors(errors ...*field.Error) { + if len(errors) == 0 { + return + } + r.Errors = append(r.Errors, errors...) +} + +func (r *ValidationResults) AddWarnings(warnings ...*field.Error) { + if len(warnings) == 0 { + return + } + r.Warnings = append(r.Warnings, warnings...) +} + +func ValidateHostPort(value string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(value) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else if _, _, err := net.SplitHostPort(value); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, value, "must be a host:port")) + } + + return allErrs +} + +func ValidateFile(path string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(path) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else if _, err := os.Stat(path); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, path, fmt.Sprintf("could not read file: %v", err))) + } + + return allErrs +} + +func ValidateSecureURL(urlString string, fldPath *field.Path) (*url.URL, field.ErrorList) { + url, urlErrs := ValidateURL(urlString, fldPath) + if len(urlErrs) == 0 && url.Scheme != "https" { + urlErrs = append(urlErrs, field.Invalid(fldPath, urlString, "must use https scheme")) + } + return url, urlErrs +} + +func ValidateURL(urlString string, fldPath *field.Path) (*url.URL, field.ErrorList) { + allErrs := field.ErrorList{} + + urlObj, err := url.Parse(urlString) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must be a valid URL")) + return nil, allErrs + } + if len(urlObj.Scheme) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must contain a scheme (e.g. https://)")) + } + if len(urlObj.Host) == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, urlString, "must contain a host")) + } + return urlObj, allErrs +} + +func ValidateDir(path string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(path) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "")) + } else { + fileInfo, err := os.Stat(path) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath, path, fmt.Sprintf("could not read info: %v", err))) + } else if !fileInfo.IsDir() { + allErrs = append(allErrs, field.Invalid(fldPath, path, "not a directory")) + } + } + + return allErrs +} + +// HostnameMatchSpecCandidates returns a list of match specs that would match the provided hostname +// Returns nil if len(hostname) == 0 +func HostnameMatchSpecCandidates(hostname string) []string { + if len(hostname) == 0 { + return nil + } + + // Exact match has priority + candidates := []string{hostname} + + // Replace successive labels in the name with wildcards, to require an exact match on number of + // path segments, because certificates cannot wildcard multiple levels of subdomains + // + // This is primarily to be consistent with tls.Config#getCertificate implementation + // + // It using a cert signed for *.foo.example.com and *.bar.example.com by specifying the name *.*.example.com + labels := strings.Split(hostname, ".") + for i := range labels { + labels[i] = "*" + candidates = append(candidates, strings.Join(labels, ".")) + } + return candidates +} + +// HostnameMatches returns true if the given hostname is matched by the given matchSpec +func HostnameMatches(hostname string, matchSpec string) bool { + return sets.NewString(HostnameMatchSpecCandidates(hostname)...).Has(matchSpec) +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go b/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go new file mode 100644 index 00000000000..947f5c91482 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/validation/serving_info.go @@ -0,0 +1,174 @@ +package validation + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + utilvalidation "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/crypto" +) + +func ValidateHTTPServingInfo(info configv1.HTTPServingInfo, fldPath *field.Path) ValidationResults { + validationResults := ValidationResults{} + + validationResults.Append(ValidateServingInfo(info.ServingInfo, true, fldPath)) + + if info.MaxRequestsInFlight < 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("maxRequestsInFlight"), info.MaxRequestsInFlight, "must be zero (no limit) or greater")) + } + + if info.RequestTimeoutSeconds < -1 { + validationResults.AddErrors(field.Invalid(fldPath.Child("requestTimeoutSeconds"), info.RequestTimeoutSeconds, "must be -1 (no timeout), 0 (default timeout), or greater")) + } + + return validationResults +} + +func ValidateServingInfo(info configv1.ServingInfo, certificatesRequired bool, fldPath *field.Path) ValidationResults { + validationResults := ValidationResults{} + + validationResults.AddErrors(ValidateHostPort(info.BindAddress, fldPath.Child("bindAddress"))...) + validationResults.AddErrors(ValidateCertInfo(info.CertInfo, certificatesRequired, fldPath)...) + + if len(info.NamedCertificates) > 0 && len(info.CertFile) == 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("namedCertificates"), "", "a default certificate and key is required in certFile/keyFile in order to use namedCertificates")) + } + + validationResults.Append(ValidateNamedCertificates(fldPath.Child("namedCertificates"), info.NamedCertificates)) + + switch info.BindNetwork { + case "tcp", "tcp4", "tcp6": + default: + validationResults.AddErrors(field.Invalid(fldPath.Child("bindNetwork"), info.BindNetwork, "must be 'tcp', 'tcp4', or 'tcp6'")) + } + + if len(info.CertFile) > 0 { + if len(info.ClientCA) > 0 { + validationResults.AddErrors(ValidateFile(info.ClientCA, fldPath.Child("clientCA"))...) + } + } else { + if certificatesRequired && len(info.ClientCA) > 0 { + validationResults.AddErrors(field.Invalid(fldPath.Child("clientCA"), info.ClientCA, "cannot specify a clientCA without a certFile")) + } + } + + if _, err := crypto.TLSVersion(info.MinTLSVersion); err != nil { + validationResults.AddErrors(field.NotSupported(fldPath.Child("minTLSVersion"), info.MinTLSVersion, crypto.ValidTLSVersions())) + } + for i, cipher := range info.CipherSuites { + if _, err := crypto.CipherSuite(cipher); err != nil { + validationResults.AddErrors(field.NotSupported(fldPath.Child("cipherSuites").Index(i), cipher, crypto.ValidCipherSuites())) + } + } + + return validationResults +} + +func ValidateNamedCertificates(fldPath *field.Path, namedCertificates []configv1.NamedCertificate) ValidationResults { + validationResults := ValidationResults{} + + takenNames := sets.NewString() + for i, namedCertificate := range namedCertificates { + idxPath := fldPath.Index(i) + + certDNSNames := []string{} + if len(namedCertificate.CertFile) == 0 { + validationResults.AddErrors(field.Required(idxPath.Child("certInfo"), "")) + } else if certInfoErrors := ValidateCertInfo(namedCertificate.CertInfo, false, idxPath); len(certInfoErrors) > 0 { + validationResults.AddErrors(certInfoErrors...) + } else if cert, err := tls.LoadX509KeyPair(namedCertificate.CertFile, namedCertificate.KeyFile); err != nil { + validationResults.AddErrors(field.Invalid(idxPath.Child("certInfo"), namedCertificate.CertInfo, fmt.Sprintf("error loading certificate/key: %v", err))) + } else { + leaf, _ := x509.ParseCertificate(cert.Certificate[0]) + certDNSNames = append(certDNSNames, leaf.Subject.CommonName) + certDNSNames = append(certDNSNames, leaf.DNSNames...) + } + + if len(namedCertificate.Names) == 0 { + validationResults.AddErrors(field.Required(idxPath.Child("names"), "")) + } + for j, name := range namedCertificate.Names { + jdxPath := idxPath.Child("names").Index(j) + if len(name) == 0 { + validationResults.AddErrors(field.Required(jdxPath, "")) + continue + } + + if takenNames.Has(name) { + validationResults.AddErrors(field.Invalid(jdxPath, name, "this name is already used in another named certificate")) + continue + } + + // validate names as domain names or *.*.foo.com domain names + validDNSName := true + for _, s := range strings.Split(name, ".") { + if s != "*" && len(utilvalidation.IsDNS1123Label(s)) != 0 { + validDNSName = false + } + } + if !validDNSName { + validationResults.AddErrors(field.Invalid(jdxPath, name, "must be a valid DNS name")) + continue + } + + takenNames.Insert(name) + + // validate certificate has common name or subject alt names that match + if len(certDNSNames) > 0 { + foundMatch := false + for _, dnsName := range certDNSNames { + if HostnameMatches(dnsName, name) { + foundMatch = true + break + } + // if the cert has a wildcard dnsName, and we've configured a non-wildcard name, see if our specified name will match against the dnsName. + if strings.HasPrefix(dnsName, "*.") && !strings.HasPrefix(name, "*.") && HostnameMatches(name, dnsName) { + foundMatch = true + break + } + } + if !foundMatch { + validationResults.AddWarnings(field.Invalid(jdxPath, name, "the specified certificate does not have a CommonName or DNS subjectAltName that matches this name")) + } + } + } + } + + return validationResults +} + +func ValidateCertInfo(certInfo configv1.CertInfo, required bool, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if required { + if len(certInfo.CertFile) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("certFile"), "The certificate file must be provided")) + } + if len(certInfo.KeyFile) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("keyFile"), "The certificate key must be provided")) + } + } + + if (len(certInfo.CertFile) == 0) != (len(certInfo.KeyFile) == 0) { + allErrs = append(allErrs, field.Required(fldPath.Child("certFile"), "Both the certificate file and the certificate key must be provided together or not at all")) + allErrs = append(allErrs, field.Required(fldPath.Child("keyFile"), "Both the certificate file and the certificate key must be provided together or not at all")) + } + + if len(certInfo.CertFile) > 0 { + allErrs = append(allErrs, ValidateFile(certInfo.CertFile, fldPath.Child("certFile"))...) + } + + if len(certInfo.KeyFile) > 0 { + allErrs = append(allErrs, ValidateFile(certInfo.KeyFile, fldPath.Child("keyFile"))...) + } + + // validate certfile/keyfile load/parse? + + return allErrs +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go new file mode 100644 index 00000000000..469c0fe4de7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go @@ -0,0 +1,268 @@ +package controllercmd + +import ( + "context" + "fmt" + "io/ioutil" + "sync" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/apiserver/pkg/server/healthz" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + + configv1 "github.com/openshift/api/config/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/config/configdefaults" + leaderelectionconverter "github.com/openshift/library-go/pkg/config/leaderelection" + "github.com/openshift/library-go/pkg/config/serving" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +// StartFunc is the function to call on leader election start +type StartFunc func(*ControllerContext) error + +type ControllerContext struct { + ComponentConfig *unstructured.Unstructured + + // KubeConfig provides the REST config with no content type (it will default to JSON). + // Use this config for CR resources. + KubeConfig *rest.Config + + // ProtoKubeConfig provides the REST config with "application/vnd.kubernetes.protobuf,application/json" content type. + // Note that this config might not be safe for CR resources, instead it should be used for other resources. + ProtoKubeConfig *rest.Config + + // EventRecorder is used to record events in controllers. + EventRecorder events.Recorder + + // Server is the GenericAPIServer serving healthz checks and debug info + Server *genericapiserver.GenericAPIServer + + Ctx context.Context +} + +// defaultObserverInterval specifies the default interval that file observer will do rehash the files it watches and react to any changes +// in those files. +var defaultObserverInterval = 5 * time.Second + +// ControllerBuilder allows the construction of an controller in optional pieces. +type ControllerBuilder struct { + kubeAPIServerConfigFile *string + clientOverrides *client.ClientConnectionOverrides + leaderElection *configv1.LeaderElection + fileObserver fileobserver.Observer + fileObserverReactorFn func(file string, action fileobserver.ActionType) error + + startFunc StartFunc + componentName string + componentNamespace string + instanceIdentity string + observerInterval time.Duration + + servingInfo *configv1.HTTPServingInfo + authenticationConfig *operatorv1alpha1.DelegatedAuthentication + authorizationConfig *operatorv1alpha1.DelegatedAuthorization + healthChecks []healthz.HealthChecker +} + +// NewController returns a builder struct for constructing the command you want to run +func NewController(componentName string, startFunc StartFunc) *ControllerBuilder { + return &ControllerBuilder{ + startFunc: startFunc, + componentName: componentName, + observerInterval: defaultObserverInterval, + } +} + +// WithRestartOnChange will enable a file observer controller loop that observes changes into specified files. If a change to a file is detected, +// the specified channel will be closed (allowing to graceful shutdown for other channels). +func (b *ControllerBuilder) WithRestartOnChange(stopCh chan<- struct{}, startingFileContent map[string][]byte, files ...string) *ControllerBuilder { + if len(files) == 0 { + return b + } + if b.fileObserver == nil { + observer, err := fileobserver.NewObserver(b.observerInterval) + if err != nil { + panic(err) + } + b.fileObserver = observer + } + var once sync.Once + + b.fileObserverReactorFn = func(filename string, action fileobserver.ActionType) error { + once.Do(func() { + klog.Warning(fmt.Sprintf("Restart triggered because of %s", action.String(filename))) + close(stopCh) + }) + return nil + } + + b.fileObserver.AddReactor(b.fileObserverReactorFn, startingFileContent, files...) + return b +} + +func (b *ControllerBuilder) WithComponentNamespace(ns string) *ControllerBuilder { + b.componentNamespace = ns + return b +} + +// WithLeaderElection adds leader election options +func (b *ControllerBuilder) WithLeaderElection(leaderElection configv1.LeaderElection, defaultNamespace, defaultName string) *ControllerBuilder { + if leaderElection.Disable { + return b + } + + defaulted := leaderelectionconverter.LeaderElectionDefaulting(leaderElection, defaultNamespace, defaultName) + b.leaderElection = &defaulted + return b +} + +// WithServer adds a server that provides metrics and healthz +func (b *ControllerBuilder) WithServer(servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization) *ControllerBuilder { + b.servingInfo = servingInfo.DeepCopy() + configdefaults.SetRecommendedHTTPServingInfoDefaults(b.servingInfo) + b.authenticationConfig = &authenticationConfig + b.authorizationConfig = &authorizationConfig + return b +} + +// WithHealthChecks adds a list of healthchecks to the server +func (b *ControllerBuilder) WithHealthChecks(healthChecks ...healthz.HealthChecker) *ControllerBuilder { + b.healthChecks = append(b.healthChecks, healthChecks...) + return b +} + +// WithKubeConfigFile sets an optional kubeconfig file. inclusterconfig will be used if filename is empty +func (b *ControllerBuilder) WithKubeConfigFile(kubeConfigFilename string, defaults *client.ClientConnectionOverrides) *ControllerBuilder { + b.kubeAPIServerConfigFile = &kubeConfigFilename + b.clientOverrides = defaults + return b +} + +// WithInstanceIdentity sets the instance identity to use if you need something special. The default is just a UID which is +// usually fine for a pod. +func (b *ControllerBuilder) WithInstanceIdentity(identity string) *ControllerBuilder { + b.instanceIdentity = identity + return b +} + +// Run starts your controller for you. It uses leader election if you asked, otherwise it directly calls you +func (b *ControllerBuilder) Run(config *unstructured.Unstructured, ctx context.Context) error { + clientConfig, err := b.getClientConfig() + if err != nil { + return err + } + + if b.fileObserver != nil { + go b.fileObserver.Run(ctx.Done()) + } + + kubeClient := kubernetes.NewForConfigOrDie(clientConfig) + namespace, err := b.getComponentNamespace() + if err != nil { + klog.Warningf("unable to identify the current namespace for events: %v", err) + } + controllerRef, err := events.GetControllerReferenceForCurrentPod(kubeClient, namespace, nil) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + eventRecorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(namespace), b.componentName, controllerRef) + + // if there is file observer defined for this command, add event into default reaction function. + if b.fileObserverReactorFn != nil { + originalFileObserverReactorFn := b.fileObserverReactorFn + b.fileObserverReactorFn = func(file string, action fileobserver.ActionType) error { + eventRecorder.Warningf("OperatorRestart", "Restarted because of %s", action.String(file)) + return originalFileObserverReactorFn(file, action) + } + } + + if b.servingInfo == nil { + return fmt.Errorf("server config required for health checks and debugging endpoints") + } + + kubeConfig := "" + if b.kubeAPIServerConfigFile != nil { + kubeConfig = *b.kubeAPIServerConfigFile + } + serverConfig, err := serving.ToServerConfig(ctx, *b.servingInfo, *b.authenticationConfig, *b.authorizationConfig, kubeConfig) + if err != nil { + return err + } + serverConfig.HealthzChecks = append(serverConfig.HealthzChecks, b.healthChecks...) + + server, err := serverConfig.Complete(nil).New(b.componentName, genericapiserver.NewEmptyDelegate()) + if err != nil { + return err + } + + go func() { + if err := server.PrepareRun().Run(ctx.Done()); err != nil { + klog.Error(err) + } + klog.Fatal("server exited") + }() + + protoConfig := rest.CopyConfig(clientConfig) + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" + + controllerContext := &ControllerContext{ + ComponentConfig: config, + KubeConfig: clientConfig, + ProtoKubeConfig: protoConfig, + EventRecorder: eventRecorder, + Server: server, + Ctx: ctx, + } + + if b.leaderElection == nil { + if err := b.startFunc(controllerContext); err != nil { + return err + } + return fmt.Errorf("exited") + } + + leaderElection, err := leaderelectionconverter.ToConfigMapLeaderElection(clientConfig, *b.leaderElection, b.componentName, b.instanceIdentity) + if err != nil { + return err + } + + leaderElection.Callbacks.OnStartedLeading = func(ctx context.Context) { + controllerContext.Ctx = ctx + if err := b.startFunc(controllerContext); err != nil { + klog.Fatal(err) + } + } + leaderelection.RunOrDie(ctx, leaderElection) + return fmt.Errorf("exited") +} + +func (b *ControllerBuilder) getComponentNamespace() (string, error) { + if len(b.componentNamespace) > 0 { + return b.componentNamespace, nil + } + nsBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "openshift-config-managed", err + } + return string(nsBytes), nil +} + +func (b *ControllerBuilder) getClientConfig() (*rest.Config, error) { + kubeconfig := "" + if b.kubeAPIServerConfigFile != nil { + kubeconfig = *b.kubeAPIServerConfigFile + } + + return client.GetKubeConfigOrInClusterConfig(kubeconfig, b.clientOverrides) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go new file mode 100644 index 00000000000..2ce3bdef0d5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go @@ -0,0 +1,267 @@ +package controllercmd + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "time" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/server" + "k8s.io/component-base/logs" + "k8s.io/klog" + + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + + "github.com/openshift/library-go/pkg/config/configdefaults" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/serviceability" + + // for metrics + _ "github.com/openshift/library-go/pkg/controller/metrics" +) + +// ControllerCommandConfig holds values required to construct a command to run. +type ControllerCommandConfig struct { + componentName string + startFunc StartFunc + version version.Info + + basicFlags *ControllerFlags +} + +// NewControllerConfig returns a new ControllerCommandConfig which can be used to wire up all the boiler plate of a controller +// TODO add more methods around wiring health checks and the like +func NewControllerCommandConfig(componentName string, version version.Info, startFunc StartFunc) *ControllerCommandConfig { + return &ControllerCommandConfig{ + startFunc: startFunc, + componentName: componentName, + version: version, + + basicFlags: NewControllerFlags(), + } +} + +// NewCommand returns a new command that a caller must set the Use and Descriptions on. It wires default log, profiling, +// leader election and other "normal" behaviors. +// Deprecated: Use the NewCommandWithContext instead, this is here to be less disturbing for existing usages. +func (c *ControllerCommandConfig) NewCommand() *cobra.Command { + return c.NewCommandWithContext(context.TODO()) + +} + +// NewCommandWithContext returns a new command that a caller must set the Use and Descriptions on. It wires default log, profiling, +// leader election and other "normal" behaviors. +// The context passed will be passed down to controller loops and observers and cancelled on SIGTERM and SIGINT signals. +func (c *ControllerCommandConfig) NewCommandWithContext(ctx context.Context) *cobra.Command { + cmd := &cobra.Command{ + Run: func(cmd *cobra.Command, args []string) { + // boiler plate for the "normal" command + rand.Seed(time.Now().UTC().UnixNano()) + logs.InitLogs() + + // handle SIGTERM and SIGINT by cancelling the context. + shutdownCtx, cancel := context.WithCancel(ctx) + shutdownHandler := server.SetupSignalHandler() + go func() { + defer cancel() + <-shutdownHandler + klog.Infof("Received SIGTERM or SIGINT signal, shutting down controller.") + }() + + defer logs.FlushLogs() + defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"), c.version)() + defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() + + serviceability.StartProfiler() + + if err := c.basicFlags.Validate(); err != nil { + klog.Fatal(err) + } + + ctx, terminate := context.WithCancel(shutdownCtx) + defer terminate() + + if len(c.basicFlags.TerminateOnFiles) > 0 { + // setup file observer to terminate when given files change + obs, err := fileobserver.NewObserver(10 * time.Second) + if err != nil { + klog.Fatal(err) + } + files := map[string][]byte{} + for _, fn := range c.basicFlags.TerminateOnFiles { + fileBytes, err := ioutil.ReadFile(fn) + if err != nil { + klog.Warningf("Unable to read initial content of %q: %v", fn, err) + continue // intentionally ignore errors + } + files[fn] = fileBytes + } + obs.AddReactor(func(filename string, action fileobserver.ActionType) error { + klog.Infof("exiting because %q changed", filename) + terminate() + return nil + }, files, c.basicFlags.TerminateOnFiles...) + + go obs.Run(shutdownHandler) + } + + if err := c.StartController(ctx); err != nil { + klog.Fatal(err) + } + }, + } + + c.basicFlags.AddFlags(cmd) + + return cmd +} + +// Config returns the configuration of this command. Use StartController if you don't need to customize the default operator. +// This method does not modify the receiver. +func (c *ControllerCommandConfig) Config() (*unstructured.Unstructured, *operatorv1alpha1.GenericOperatorConfig, []byte, error) { + configContent, unstructuredConfig, err := c.basicFlags.ToConfigObj() + if err != nil { + return nil, nil, nil, err + } + config := &operatorv1alpha1.GenericOperatorConfig{} + if unstructuredConfig != nil { + // make a copy we can mutate + configCopy := unstructuredConfig.DeepCopy() + // force the config to our version to read it + configCopy.SetGroupVersionKind(operatorv1alpha1.GroupVersion.WithKind("GenericOperatorConfig")) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configCopy.Object, config); err != nil { + return nil, nil, nil, err + } + } + return unstructuredConfig, config, configContent, nil +} + +func hasServiceServingCerts(certDir string) bool { + if _, err := os.Stat(filepath.Join(certDir, "tls.crt")); os.IsNotExist(err) { + return false + } + if _, err := os.Stat(filepath.Join(certDir, "tls.key")); os.IsNotExist(err) { + return false + } + return true +} + +// AddDefaultRotationToConfig starts the provided builder with the default rotation set (config + serving info). Use StartController if +// you do not need to customize the controller builder. This method modifies config with self-signed default cert locations if +// necessary. +func (c *ControllerCommandConfig) AddDefaultRotationToConfig(config *operatorv1alpha1.GenericOperatorConfig, configContent []byte) (map[string][]byte, []string, error) { + certDir := "/var/run/secrets/serving-cert" + + observedFiles := []string{ + c.basicFlags.ConfigFile, + // We observe these, so we they are created or modified by service serving cert signer, we can react and restart the process + // that will pick these up instead of generating the self-signed certs. + // NOTE: We are not observing the temporary, self-signed certificates. + filepath.Join(certDir, "tls.crt"), + filepath.Join(certDir, "tls.key"), + } + // startingFileContent holds hardcoded starting content. If we generate our own certificates, then we want to specify empty + // content to avoid a starting race. When we consume them, the race is really about as good as we can do since we don't know + // what's actually been read. + startingFileContent := map[string][]byte{ + c.basicFlags.ConfigFile: configContent, + } + + // if we don't have any serving cert/key pairs specified and the defaults are not present, generate a self-signed set + // TODO maybe this should be optional? It's a little difficult to come up with a scenario where this is worse than nothing though. + if len(config.ServingInfo.CertFile) == 0 && len(config.ServingInfo.KeyFile) == 0 { + servingInfoCopy := config.ServingInfo.DeepCopy() + configdefaults.SetRecommendedHTTPServingInfoDefaults(servingInfoCopy) + + if hasServiceServingCerts(certDir) { + klog.Infof("Using service-serving-cert provided certificates") + config.ServingInfo.CertFile = filepath.Join(certDir, "tls.crt") + config.ServingInfo.KeyFile = filepath.Join(certDir, "tls.key") + } else { + klog.Warningf("Using insecure, self-signed certificates") + temporaryCertDir, err := ioutil.TempDir("", "serving-cert-") + if err != nil { + return nil, nil, err + } + signerName := fmt.Sprintf("%s-signer@%d", c.componentName, time.Now().Unix()) + ca, err := crypto.MakeSelfSignedCA( + filepath.Join(temporaryCertDir, "serving-signer.crt"), + filepath.Join(temporaryCertDir, "serving-signer.key"), + filepath.Join(temporaryCertDir, "serving-signer.serial"), + signerName, + 0, + ) + if err != nil { + return nil, nil, err + } + certDir = temporaryCertDir + + // force the values to be set to where we are writing the certs + config.ServingInfo.CertFile = filepath.Join(certDir, "tls.crt") + config.ServingInfo.KeyFile = filepath.Join(certDir, "tls.key") + // nothing can trust this, so we don't really care about hostnames + servingCert, err := ca.MakeServerCert(sets.NewString("localhost"), 30) + if err != nil { + return nil, nil, err + } + if err := servingCert.WriteCertConfigFile(config.ServingInfo.CertFile, config.ServingInfo.KeyFile); err != nil { + return nil, nil, err + } + crtContent := &bytes.Buffer{} + keyContent := &bytes.Buffer{} + if err := servingCert.WriteCertConfig(crtContent, keyContent); err != nil { + return nil, nil, err + } + + // If we generate our own certificates, then we want to specify empty content to avoid a starting race. This way, + // if any change comes in, we will properly restart + startingFileContent[filepath.Join(certDir, "tls.crt")] = crtContent.Bytes() + startingFileContent[filepath.Join(certDir, "tls.key")] = keyContent.Bytes() + } + } + return startingFileContent, observedFiles, nil +} + +// StartController runs the controller. This is the recommend entrypoint when you don't need +// to customize the builder. +func (c *ControllerCommandConfig) StartController(ctx context.Context) error { + unstructuredConfig, config, configContent, err := c.Config() + if err != nil { + return err + } + + startingFileContent, observedFiles, err := c.AddDefaultRotationToConfig(config, configContent) + if err != nil { + return err + } + + exitOnChangeReactorCh := make(chan struct{}) + ctx2, cancel := context.WithCancel(ctx) + go func() { + select { + case <-exitOnChangeReactorCh: + cancel() + case <-ctx.Done(): + cancel() + } + }() + + builder := NewController(c.componentName, c.startFunc). + WithKubeConfigFile(c.basicFlags.KubeConfigFile, nil). + WithLeaderElection(config.LeaderElection, "", c.componentName+"-lock"). + WithServer(config.ServingInfo, config.Authentication, config.Authorization). + WithRestartOnChange(exitOnChangeReactorCh, startingFileContent, observedFiles...) + + return builder.Run(unstructuredConfig, ctx2) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go new file mode 100644 index 00000000000..fe33b4351c9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/flags.go @@ -0,0 +1,132 @@ +package controllercmd + +import ( + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/spf13/cobra" + + "github.com/openshift/library-go/pkg/config/client" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/rest" +) + +// ControllerFlags provides the "normal" controller flags +type ControllerFlags struct { + // ConfigFile hold the configfile to load + ConfigFile string + // KubeConfigFile points to a kubeconfig file if you don't want to use the in cluster config + KubeConfigFile string + // TerminateOnFiles is a list of files. If any of these changes, the process terminates. + TerminateOnFiles []string +} + +// NewControllerFlags returns flags with default values set +func NewControllerFlags() *ControllerFlags { + return &ControllerFlags{} +} + +// Validate makes sure the required flags are specified and no illegal combinations are found +func (o *ControllerFlags) Validate() error { + // everything is optional currently + return nil +} + +// AddFlags register and binds the default flags +func (f *ControllerFlags) AddFlags(cmd *cobra.Command) { + flags := cmd.Flags() + // This command only supports reading from config + flags.StringVar(&f.ConfigFile, "config", f.ConfigFile, "Location of the master configuration file to run from.") + cmd.MarkFlagFilename("config", "yaml", "yml") + flags.StringVar(&f.KubeConfigFile, "kubeconfig", f.KubeConfigFile, "Location of the master configuration file to run from.") + cmd.MarkFlagFilename("kubeconfig", "kubeconfig") + flags.StringArrayVar(&f.TerminateOnFiles, "terminate-on-files", f.TerminateOnFiles, "A list of files. If one of them changes, the process will terminate.") +} + +// ToConfigObj given completed flags, returns a config object for the flag that was specified. +// TODO versions goes away in 1.11 +func (f *ControllerFlags) ToConfigObj() ([]byte, *unstructured.Unstructured, error) { + // no file means empty, not err + if len(f.ConfigFile) == 0 { + return nil, nil, nil + } + + content, err := ioutil.ReadFile(f.ConfigFile) + if err != nil { + return nil, nil, err + } + // empty file means empty, not err + if len(content) == 0 { + return nil, nil, err + } + + data, err := kyaml.ToJSON(content) + if err != nil { + return nil, nil, err + } + uncastObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, data) + if err != nil { + return nil, nil, err + } + + return content, uncastObj.(*unstructured.Unstructured), nil +} + +// ToClientConfig given completed flags, returns a rest.Config. overrides are optional +func (f *ControllerFlags) ToClientConfig(overrides *client.ClientConnectionOverrides) (*rest.Config, error) { + return client.GetKubeConfigOrInClusterConfig(f.KubeConfigFile, overrides) +} + +// ReadYAML decodes a runtime.Object from the provided scheme +// TODO versions goes away with more complete scheme in 1.11 +func ReadYAML(data []byte, configScheme *runtime.Scheme, versions ...schema.GroupVersion) (runtime.Object, error) { + data, err := kyaml.ToJSON(data) + if err != nil { + return nil, err + } + configCodecFactory := serializer.NewCodecFactory(configScheme) + obj, err := runtime.Decode(configCodecFactory.UniversalDecoder(versions...), data) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", data, err) + } + return obj, err +} + +// ReadYAMLFile read a file and decodes a runtime.Object from the provided scheme +func ReadYAMLFile(filename string, configScheme *runtime.Scheme, versions ...schema.GroupVersion) (runtime.Object, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + obj, err := ReadYAML(data, configScheme, versions...) + if err != nil { + return nil, fmt.Errorf("could not load config file %q due to an error: %v", filename, err) + } + return obj, err +} + +// TODO: we ultimately want a better decoder for JSON that allows us exact line numbers and better +// surrounding text description. This should be removed / replaced when that happens. +func captureSurroundingJSONForError(prefix string, data []byte, err error) error { + if syntaxErr, ok := err.(*json.SyntaxError); err != nil && ok { + offset := syntaxErr.Offset + begin := offset - 20 + if begin < 0 { + begin = 0 + } + end := offset + 20 + if end > int64(len(data)) { + end = int64(len(data)) + } + return fmt.Errorf("%s%v (found near '%s')", prefix, err, string(data[begin:end])) + } + if err != nil { + return fmt.Errorf("%s%v", prefix, err) + } + return err +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS new file mode 100644 index 00000000000..bf630bd0714 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - deads2k + - sttts + - mfojtik +approvers: + - mfojtik diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go new file mode 100644 index 00000000000..c5a120574a0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer.go @@ -0,0 +1,66 @@ +package fileobserver + +import ( + "fmt" + "os" + "time" + + "k8s.io/klog" +) + +type Observer interface { + Run(stopChan <-chan struct{}) + HasSynced() bool + AddReactor(reaction ReactorFn, startingFileContent map[string][]byte, files ...string) Observer +} + +// ActionType define a type of action observed on the file +type ActionType int + +const ( + // FileModified means the file content was modified. + FileModified ActionType = iota + + // FileCreated means the file was just created. + FileCreated + + // FileDeleted means the file was deleted. + FileDeleted +) + +// String returns human readable form of action taken on a file. +func (t ActionType) String(filename string) string { + switch t { + case FileCreated: + return fmt.Sprintf("file %s was created", filename) + case FileDeleted: + return fmt.Sprintf("file %s was deleted", filename) + case FileModified: + return fmt.Sprintf("file %s was modified", filename) + } + return "" +} + +// ReactorFn define a reaction function called when an observed file is modified. +type ReactorFn func(file string, action ActionType) error + +// ExitOnChangeReactor provides reactor function that causes the process to exit when the change is detected. +// DEPRECATED: Using this function cause process to exit immediately without proper shutdown (context close/etc.) +// Use the TerminateOnChangeReactor() instead. +var ExitOnChangeReactor = TerminateOnChangeReactor(func() { os.Exit(0) }) + +func TerminateOnChangeReactor(terminateFn func()) ReactorFn { + return func(filename string, action ActionType) error { + klog.Infof("Triggering shutdown because %s", action.String(filename)) + terminateFn() + return nil + } +} + +func NewObserver(interval time.Duration) (Observer, error) { + return &pollingObserver{ + interval: interval, + reactors: map[string][]ReactorFn{}, + files: map[string]fileHashAndState{}, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go new file mode 100644 index 00000000000..0b33e79ef1a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling.go @@ -0,0 +1,191 @@ +package fileobserver + +import ( + "bytes" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog" +) + +type pollingObserver struct { + interval time.Duration + reactors map[string][]ReactorFn + files map[string]fileHashAndState + + reactorsMutex sync.RWMutex + + syncedMutex sync.RWMutex + hasSynced bool +} + +// HasSynced indicates that the observer synced all observed files at least once. +func (o *pollingObserver) HasSynced() bool { + o.syncedMutex.RLock() + defer o.syncedMutex.RUnlock() + return o.hasSynced +} + +// AddReactor will add new reactor to this observer. +func (o *pollingObserver) AddReactor(reaction ReactorFn, startingFileContent map[string][]byte, files ...string) Observer { + o.reactorsMutex.Lock() + defer o.reactorsMutex.Unlock() + for _, f := range files { + if len(f) == 0 { + panic(fmt.Sprintf("observed file name must not be empty (%#v)", files)) + } + // Do not rehash existing files + if _, exists := o.files[f]; exists { + continue + } + var err error + + if startingContent, ok := startingFileContent[f]; ok { + klog.V(3).Infof("Starting from specified content for file %q", f) + // if empty starting content is specified, do not hash the empty string but just return it the same + // way as calculateFileHash() does in that case. + // in case the file exists and is empty, we don't care about the initial content anyway, because we + // are only going to react when the file content change. + // in case the file does not exists but empty string is specified as initial content, without this + // the content will be hashed and reaction will trigger as if the content changed. + if len(startingContent) == 0 { + o.files[f] = fileHashAndState{exists: true} + o.reactors[f] = append(o.reactors[f], reaction) + continue + } + currentHash, emptyFile, err := calculateHash(bytes.NewBuffer(startingContent)) + if err != nil { + panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) + } + o.files[f] = fileHashAndState{exists: true, hash: currentHash, isEmpty: emptyFile} + } else { + klog.V(3).Infof("Adding reactor for file %q", f) + o.files[f], err = calculateFileHash(f) + if err != nil && !os.IsNotExist(err) { + panic(fmt.Sprintf("unexpected error while adding reactor for %#v: %v", files, err)) + } + } + o.reactors[f] = append(o.reactors[f], reaction) + } + return o +} + +func (o *pollingObserver) processReactors(stopCh <-chan struct{}) { + err := wait.PollImmediateInfinite(o.interval, func() (bool, error) { + select { + case <-stopCh: + return true, nil + default: + } + o.reactorsMutex.RLock() + defer o.reactorsMutex.RUnlock() + for filename, reactors := range o.reactors { + currentFileState, err := calculateFileHash(filename) + if err != nil && !os.IsNotExist(err) { + return false, err + } + + lastKnownFileState := o.files[filename] + o.files[filename] = currentFileState + + for i := range reactors { + var action ActionType + switch { + case !lastKnownFileState.exists && !currentFileState.exists: + // skip non-existing file + continue + case !lastKnownFileState.exists && currentFileState.exists && (len(currentFileState.hash) > 0 || currentFileState.isEmpty): + // if we see a new file created that has content or its empty, trigger FileCreate action + klog.Infof("Observed file %q has been created (hash=%q)", filename, currentFileState.hash) + action = FileCreated + case lastKnownFileState.exists && !currentFileState.exists: + klog.Infof("Observed file %q has been deleted", filename) + action = FileDeleted + case lastKnownFileState.hash == currentFileState.hash: + // skip if the hashes are the same + continue + case lastKnownFileState.hash != currentFileState.hash: + klog.Infof("Observed file %q has been modified (old=%q, new=%q)", filename, lastKnownFileState.hash, currentFileState.hash) + action = FileModified + } + if err := reactors[i](filename, action); err != nil { + klog.Errorf("Reactor for %q failed: %v", filename, err) + } + } + } + if !o.HasSynced() { + o.syncedMutex.Lock() + o.hasSynced = true + o.syncedMutex.Unlock() + klog.V(3).Info("File observer successfully synced") + } + return false, nil + }) + if err != nil { + klog.Fatalf("file observer failed: %v", err) + } +} + +// Run will start a new observer. +func (o *pollingObserver) Run(stopChan <-chan struct{}) { + klog.Info("Starting file observer") + defer klog.Infof("Shutting down file observer") + o.processReactors(stopChan) +} + +type fileHashAndState struct { + hash string + exists bool + isEmpty bool +} + +func calculateFileHash(path string) (fileHashAndState, error) { + result := fileHashAndState{} + stat, err := os.Stat(path) + if err != nil { + return result, err + } + + // this is fatal + if stat.IsDir() { + return result, fmt.Errorf("you can watch only files, %s is a directory", path) + } + + f, err := os.Open(path) + if err != nil { + return result, err + } + defer f.Close() + + // at this point we know for sure the file exists and we can read its content even if that content is empty + result.exists = true + + hash, empty, err := calculateHash(f) + if err != nil { + return result, err + } + + result.hash = hash + result.isEmpty = empty + + return result, nil +} + +func calculateHash(content io.Reader) (string, bool, error) { + hasher := sha256.New() + written, err := io.Copy(hasher, content) + if err != nil { + return "", false, err + } + // written == 0 means the content is empty + if written == 0 { + return "", true, nil + } + return hex.EncodeToString(hasher.Sum(nil)), false, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling_test.go b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling_test.go new file mode 100644 index 00000000000..32af864335b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/fileobserver/observer_polling_test.go @@ -0,0 +1,440 @@ +package fileobserver + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "k8s.io/apimachinery/pkg/util/wait" +) + +func TestObserverPolling(t *testing.T) { + type observedAction struct { + file string + action ActionType + } + + var ( + nonEmptyContent = []byte("non-empty") + changedContent = []byte("change") + emptyContent = []byte("") + + observedSingleFileCreated = func(actions []observedAction, t *testing.T) { + if len(actions) == 0 { + t.Errorf("no actions observed, but expected to observe created") + return + } + if actions[0].action != FileCreated { + t.Errorf("created action expected, but observed %q", actions[0].action.String(path.Base(actions[0].file))) + } + } + + observedSingleFileModified = func(actions []observedAction, t *testing.T) { + if len(actions) == 0 { + t.Errorf("no actions observed, but expected to observe modified") + return + } + if actions[0].action != FileModified { + t.Errorf("modified action expected, but observed %q", actions[0].action.String(path.Base(actions[0].file))) + } + } + + observedSingleFileDeleted = func(actions []observedAction, t *testing.T) { + if len(actions) == 0 { + t.Errorf("no actions observed, but expected to observe deleted") + return + } + if actions[0].action != FileDeleted { + t.Errorf("deleted action expected, but observed %q", actions[0].action.String(path.Base(actions[0].file))) + } + } + + observedNoChanges = func(actions []observedAction, t *testing.T) { + if len(actions) != 0 { + var result []string + for _, a := range actions { + result = append(result, a.action.String(path.Base(a.file))) + } + t.Errorf("expected to not observe any actions, but observed: %s", strings.Join(result, ",")) + } + } + + defaultTimeout = 5 * time.Second + ) + + tests := []struct { + name string + startFileContent []byte // the content the file is created with initially + changeFileContent []byte // change the file content + deleteFile bool // change the file by deleting it + startWithNoFile bool // start test with no file + setInitialContent bool // set the initial content + initialContent map[string][]byte // initial content to pass to observer + timeout time.Duration // maximum test duration (default: 5s) + waitForObserver time.Duration // duration to wait for observer to sync changes (default: 300ms) + + evaluateActions func([]observedAction, *testing.T) // func to evaluate observed actions + }{ + { + name: "start with existing non-empty file with no change and initial content set", + evaluateActions: observedNoChanges, + setInitialContent: true, + startFileContent: nonEmptyContent, + timeout: 1 * time.Second, + }, + { + name: "start with existing non-empty file with no change and no initial content set", + evaluateActions: observedNoChanges, + startFileContent: nonEmptyContent, + timeout: 1 * time.Second, + }, + { + name: "start with existing non-empty file that change", + evaluateActions: observedSingleFileModified, + setInitialContent: true, + startFileContent: nonEmptyContent, + changeFileContent: changedContent, + }, + { + name: "start with existing non-empty file and no initial content that change", + evaluateActions: observedSingleFileModified, + startFileContent: nonEmptyContent, + changeFileContent: changedContent, + }, + { + name: "start with existing empty file with no change", + evaluateActions: observedNoChanges, + setInitialContent: true, + startFileContent: emptyContent, + changeFileContent: emptyContent, + }, + { + name: "start with existing empty file and no initial content with no change", + evaluateActions: observedNoChanges, + startFileContent: emptyContent, + changeFileContent: emptyContent, + }, + { + name: "start with existing empty file that change content", + evaluateActions: observedSingleFileModified, + startFileContent: emptyContent, + changeFileContent: changedContent, + }, + { + name: "start with existing empty file and empty initial content that change content", + evaluateActions: observedSingleFileModified, + setInitialContent: true, + startFileContent: emptyContent, + changeFileContent: changedContent, + }, + { + name: "start with non-existing file with no change", + evaluateActions: observedNoChanges, + startWithNoFile: true, + }, + { + name: "start with non-existing file that is created as empty file", + evaluateActions: observedSingleFileCreated, + startWithNoFile: true, + changeFileContent: emptyContent, + }, + { + name: "start with non-existing file that is created as non-empty file", + evaluateActions: observedSingleFileCreated, + startWithNoFile: true, + changeFileContent: nonEmptyContent, + }, + { + name: "start with existing file with content that is deleted", + evaluateActions: observedSingleFileDeleted, + setInitialContent: true, + startFileContent: nonEmptyContent, + deleteFile: true, + }, + { + name: "start with existing file with content and not initial content set that is deleted", + evaluateActions: observedSingleFileDeleted, + startFileContent: nonEmptyContent, + deleteFile: true, + }, + } + + baseDir, err := ioutil.TempDir("", "observer-poll-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(baseDir) + + for _, test := range tests { + if test.timeout == 0 { + test.timeout = defaultTimeout + } + t.Run(test.name, func(t *testing.T) { + observer, err := NewObserver(200 * time.Millisecond) + if err != nil { + t.Fatal(err) + } + + testDir := filepath.Join(baseDir, t.Name()) + if err := os.MkdirAll(filepath.Join(baseDir, t.Name()), 0777); err != nil { + t.Fatal(err) + } + + testFile := filepath.Join(testDir, "testfile") + + if test.setInitialContent { + test.initialContent = map[string][]byte{ + testFile: test.startFileContent, + } + } + + if !test.startWithNoFile { + if err := ioutil.WriteFile(testFile, test.startFileContent, os.ModePerm); err != nil { + t.Fatal(err) + } + t.Logf("created file %q with content: %q", testFile, string(test.startFileContent)) + } + + observedChan := make(chan observedAction) + observer.AddReactor(func(file string, action ActionType) error { + t.Logf("observed %q", action.String(path.Base(file))) + observedChan <- observedAction{ + file: file, + action: action, + } + return nil + }, test.initialContent, testFile) + + stopChan := make(chan struct{}) + + // start observing actions + observedActions := []observedAction{} + var observedActionsMutex sync.Mutex + stopObservingChan := make(chan struct{}) + go func() { + for { + select { + case action := <-observedChan: + observedActionsMutex.Lock() + observedActions = append(observedActions, action) + observedActionsMutex.Unlock() + case <-stopObservingChan: + return + } + } + }() + + // start file observer + go observer.Run(stopChan) + + // wait until file observer see the files at least once + if err := wait.PollImmediate(10*time.Millisecond, test.timeout, func() (done bool, err error) { + return observer.HasSynced(), nil + }); err != nil { + t.Errorf("failed to wait for observer to sync: %v", err) + } + t.Logf("starting observing changes ...") + + if test.changeFileContent != nil { + t.Logf("writing %q ...", string(test.changeFileContent)) + if err := ioutil.WriteFile(testFile, test.changeFileContent, os.ModePerm); err != nil { + t.Fatal(err) + } + } + + if test.deleteFile { + if err := os.RemoveAll(testDir); err != nil { + t.Fatal(err) + } + } + + // give observer time to observe latest events + if test.waitForObserver == 0 { + time.Sleep(400 * time.Millisecond) + } else { + time.Sleep(test.waitForObserver) + } + + close(stopObservingChan) + close(stopChan) + + observedActionsMutex.Lock() + defer observedActionsMutex.Unlock() + test.evaluateActions(observedActions, t) // evaluate observed actions + }) + } +} + +type reactionRecorder struct { + reactions map[string][]ActionType + sync.RWMutex +} + +func newReactionRecorder() *reactionRecorder { + return &reactionRecorder{reactions: map[string][]ActionType{}} +} + +func (r *reactionRecorder) get(f string) []ActionType { + r.RLock() + defer r.RUnlock() + return r.reactions[f] +} + +func (r *reactionRecorder) add(f string, action ActionType) { + r.Lock() + defer r.Unlock() + r.reactions[f] = append(r.reactions[f], action) +} + +func TestObserverSimple(t *testing.T) { + dir, err := ioutil.TempDir("", "observer-simple-") + if err != nil { + t.Fatalf("tempdir: %v", err) + } + defer os.RemoveAll(dir) + + o, err := NewObserver(200 * time.Millisecond) + if err != nil { + t.Fatalf("observer: %v", err) + } + + reactions := newReactionRecorder() + + testReaction := func(f string, action ActionType) error { + reactions.add(f, action) + return nil + } + + testFile := filepath.Join(dir, "test-file-1") + + o.AddReactor(testReaction, nil, testFile) + + stopCh := make(chan struct{}) + defer close(stopCh) + go o.Run(stopCh) + + fileCreateObserved := make(chan struct{}) + go func() { + defer close(fileCreateObserved) + if err := wait.PollImmediateUntil(300*time.Millisecond, func() (bool, error) { + t.Logf("waiting for reaction ...") + if len(reactions.get(testFile)) == 0 { + return false, nil + } + if r := reactions.get(testFile)[0]; r != FileCreated { + return true, fmt.Errorf("expected FileCreated, got: %#v", reactions.get(testFile)) + } + t.Logf("recv: %#v", reactions.get(testFile)) + return true, nil + }, stopCh); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }() + + ioutil.WriteFile(testFile, []byte("foo"), os.ModePerm) + <-fileCreateObserved + + fileModifiedObserved := make(chan struct{}) + go func() { + defer close(fileModifiedObserved) + if err := wait.PollImmediateUntil(300*time.Millisecond, func() (bool, error) { + t.Logf("waiting for reaction ...") + if len(reactions.get(testFile)) != 2 { + return false, nil + } + + if r := reactions.get(testFile)[1]; r != FileModified { + return true, fmt.Errorf("expected FileModified, got: %#v", reactions.get(testFile)) + } + t.Logf("recv: %#v", reactions.get(testFile)) + return true, nil + }, stopCh); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }() + + ioutil.WriteFile(testFile, []byte("bar"), os.ModePerm) + <-fileModifiedObserved + + fileRemoveObserved := make(chan struct{}) + go func() { + defer close(fileRemoveObserved) + if err := wait.PollImmediateUntil(300*time.Millisecond, func() (bool, error) { + t.Logf("waiting for reaction ...") + if len(reactions.get(testFile)) != 3 { + return false, nil + } + if r := reactions.get(testFile)[2]; r != FileDeleted { + return true, fmt.Errorf("expected FileDeleted, got: %#v", reactions.get(testFile)) + } + t.Logf("recv: %#v", reactions.get(testFile)) + return true, nil + }, stopCh); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }() + + os.Remove(testFile) + <-fileRemoveObserved +} + +func TestObserverSimpleContentSpecified(t *testing.T) { + dir, err := ioutil.TempDir("", "observer-simple-") + if err != nil { + t.Fatalf("tempdir: %v", err) + } + defer os.RemoveAll(dir) + + o, err := NewObserver(200 * time.Millisecond) + if err != nil { + t.Fatalf("observer: %v", err) + } + + reactions := newReactionRecorder() + + testReaction := func(f string, action ActionType) error { + reactions.add(f, action) + return nil + } + + testFile := filepath.Join(dir, "test-file-1") + ioutil.WriteFile(testFile, []byte("foo"), os.ModePerm) + + o.AddReactor( + testReaction, + map[string][]byte{ + testFile: []byte("bar"), + }, + testFile) + + stopCh := make(chan struct{}) + defer close(stopCh) + go o.Run(stopCh) + + fileModifyObserved := make(chan struct{}) + go func() { + defer close(fileModifyObserved) + if err := wait.PollImmediateUntil(300*time.Millisecond, func() (bool, error) { + t.Logf("waiting for reaction ...") + if len(reactions.get(testFile)) == 0 { + return false, nil + } + if r := reactions.get(testFile)[0]; r != FileModified { + return true, fmt.Errorf("expected FileModified, got: %#v", reactions.get(testFile)) + } + t.Logf("recv: %#v", reactions.get(testFile)) + return true, nil + }, stopCh); err != nil { + t.Fatalf("unexpected error: %v", err) + } + }() + + <-fileModifyObserved + os.Remove(testFile) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go new file mode 100644 index 00000000000..ba2892ec8fd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics.go @@ -0,0 +1,81 @@ +package metrics + +import ( + "net/url" + "time" + + "github.com/blang/semver" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + + "github.com/prometheus/client_golang/prometheus" +) + +var ( + // requestLatency is a Prometheus Summary metric type partitioned by + // "verb" and "url" labels. It is used for the rest client latency metrics. + requestLatency = k8smetrics.NewHistogramVec( + &k8smetrics.HistogramOpts{ + Name: "rest_client_request_latency_seconds", + Help: "Request latency in seconds. Broken down by verb and URL.", + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), + }, + []string{"verb", "url"}, + ) + + requestResult = k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ + Name: "rest_client_requests_total", + Help: "Number of HTTP requests, partitioned by status code, method, and host.", + }, + []string{"code", "method", "host"}, + ) +) + +func init() { + legacyregistry.MustRegister(requestLatency) + legacyregistry.MustRegister(requestResult) + + legacyregistry.Register(&latencyAdapter{requestLatency}) + legacyregistry.Register(&resultAdapter{requestResult}) +} + +type latencyAdapter struct { + m *k8smetrics.HistogramVec +} + +func (l *latencyAdapter) Describe(c chan<- *prometheus.Desc) { + l.m.Describe(c) +} + +func (l *latencyAdapter) Collect(c chan<- prometheus.Metric) { + l.m.Collect(c) +} + +func (l *latencyAdapter) Create(version *semver.Version) bool { + return l.m.Create(version) +} + +func (l *latencyAdapter) Observe(verb string, u url.URL, latency time.Duration) { + l.m.WithLabelValues(verb, u.String()).Observe(latency.Seconds()) +} + +type resultAdapter struct { + m *k8smetrics.CounterVec +} + +func (r *resultAdapter) Describe(c chan<- *prometheus.Desc) { + r.m.Describe(c) +} + +func (r *resultAdapter) Collect(c chan<- prometheus.Metric) { + r.m.Collect(c) +} + +func (r *resultAdapter) Create(version *semver.Version) bool { + return r.m.Create(version) +} + +func (r *resultAdapter) Increment(code, method, host string) { + r.m.WithLabelValues(code, method, host).Inc() +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics_test.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics_test.go new file mode 100644 index 00000000000..df93a01bdce --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/client_metrics_test.go @@ -0,0 +1,9 @@ +package metrics + +import ( + "testing" +) + +func TestNothing(t *testing.T) { + t.Log("success!") +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go new file mode 100644 index 00000000000..84afec388ad --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go @@ -0,0 +1,210 @@ +package metrics + +import ( + "k8s.io/client-go/util/workqueue" + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog" + + "github.com/prometheus/client_golang/prometheus" +) + +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +func init() { + workqueue.SetProvider(prometheusMetricsProvider{}) +} + +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +// Metrics subsystem and keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +func init() { + workqueue.SetProvider(prometheusMetricsProvider{}) +} + +type prometheusMetricsProvider struct{} + +func (prometheusMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { + depth := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(depth) + return depth +} + +func (prometheusMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + adds := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(adds) + return adds +} + +func (prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + latency := k8smetrics.NewHistogram(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested.", + ConstLabels: prometheus.Labels{"name": name}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + legacyregistry.Register(latency) + return latency +} + +func (prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + workDuration := k8smetrics.NewHistogram(&k8smetrics.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + ConstLabels: prometheus.Labels{"name": name}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + legacyregistry.Register(workDuration) + return workDuration +} + +func (prometheusMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + retries := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + legacyregistry.Register(retries) + return retries +} + +// TODO(danielqsj): Remove the following metrics, they are deprecated +func (prometheusMetricsProvider) NewDeprecatedDepthMetric(name string) workqueue.GaugeMetric { + depth := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: name, + Name: "depth", + Help: "(Deprecated) Current depth of workqueue: " + name, + }) + if err := legacyregistry.Register(depth); err != nil { + klog.Errorf("failed to register depth metric %v: %v", name, err) + } + return depth +} + +func (prometheusMetricsProvider) NewDeprecatedAddsMetric(name string) workqueue.CounterMetric { + adds := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: name, + Name: "adds", + Help: "(Deprecated) Total number of adds handled by workqueue: " + name, + }) + if err := legacyregistry.Register(adds); err != nil { + klog.Errorf("failed to register adds metric %v: %v", name, err) + } + return adds +} + +func (prometheusMetricsProvider) NewDeprecatedLatencyMetric(name string) workqueue.SummaryMetric { + latency := k8smetrics.NewSummary(&k8smetrics.SummaryOpts{ + Subsystem: name, + Name: "queue_latency", + Help: "(Deprecated) How long an item stays in workqueue" + name + " before being requested.", + }) + if err := legacyregistry.Register(latency); err != nil { + klog.Errorf("failed to register latency metric %v: %v", name, err) + } + return latency +} + +func (prometheusMetricsProvider) NewDeprecatedWorkDurationMetric(name string) workqueue.SummaryMetric { + workDuration := k8smetrics.NewSummary(&k8smetrics.SummaryOpts{ + Subsystem: name, + Name: "work_duration", + Help: "(Deprecated) How long processing an item from workqueue" + name + " takes.", + }) + if err := legacyregistry.Register(workDuration); err != nil { + klog.Errorf("failed to register work_duration metric %v: %v", name, err) + } + return workDuration +} + +func (prometheusMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: name, + Name: "unfinished_work_seconds", + Help: "(Deprecated) How many seconds of work " + name + " has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + }) + if err := legacyregistry.Register(unfinished); err != nil { + klog.Errorf("failed to register unfinished_work_seconds metric %v: %v", name, err) + } + return unfinished +} + +func (prometheusMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := k8smetrics.NewGauge(&k8smetrics.GaugeOpts{ + Subsystem: name, + Name: "longest_running_processor_microseconds", + Help: "(Deprecated) How many microseconds has the longest running " + + "processor for " + name + " been running.", + }) + if err := legacyregistry.Register(unfinished); err != nil { + klog.Errorf("failed to register longest_running_processor_microseconds metric %v: %v", name, err) + } + return unfinished +} + +func (prometheusMetricsProvider) NewDeprecatedRetriesMetric(name string) workqueue.CounterMetric { + retries := k8smetrics.NewCounter(&k8smetrics.CounterOpts{ + Subsystem: name, + Name: "retries", + Help: "(Deprecated) Total number of retries handled by workqueue: " + name, + }) + if err := legacyregistry.Register(retries); err != nil { + klog.Errorf("failed to register retries metric %v: %v", name, err) + } + return retries +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/ownerref.go b/vendor/github.com/openshift/library-go/pkg/controller/ownerref.go new file mode 100644 index 00000000000..9c778934aa9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/ownerref.go @@ -0,0 +1,60 @@ +package controller + +import ( + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnsureOwnerRef adds the ownerref if needed. Removes ownerrefs with conflicting UIDs. +// Returns true if the input is mutated. +func EnsureOwnerRef(metadata metav1.Object, newOwnerRef metav1.OwnerReference) bool { + foundButNotEqual := false + for _, existingOwnerRef := range metadata.GetOwnerReferences() { + if existingOwnerRef.APIVersion == newOwnerRef.APIVersion && + existingOwnerRef.Kind == newOwnerRef.Kind && + existingOwnerRef.Name == newOwnerRef.Name { + + // if we're completely the same, there's nothing to do + if equality.Semantic.DeepEqual(existingOwnerRef, newOwnerRef) { + return false + } + + foundButNotEqual = true + break + } + } + + // if we weren't found, then we just need to add ourselves + if !foundButNotEqual { + metadata.SetOwnerReferences(append(metadata.GetOwnerReferences(), newOwnerRef)) + return true + } + + // if we need to remove an existing ownerRef, just do the easy thing and build it back from scratch + newOwnerRefs := []metav1.OwnerReference{newOwnerRef} + for i := range metadata.GetOwnerReferences() { + existingOwnerRef := metadata.GetOwnerReferences()[i] + if existingOwnerRef.APIVersion == newOwnerRef.APIVersion && + existingOwnerRef.Kind == newOwnerRef.Kind && + existingOwnerRef.Name == newOwnerRef.Name { + continue + } + newOwnerRefs = append(newOwnerRefs, existingOwnerRef) + } + metadata.SetOwnerReferences(newOwnerRefs) + return true +} + +// HasOwnerRef checks to see if an object has a particular owner. It is not opinionated about +// the bool fields +func HasOwnerRef(metadata metav1.Object, needle metav1.OwnerReference) bool { + for _, existingOwnerRef := range metadata.GetOwnerReferences() { + if existingOwnerRef.APIVersion == needle.APIVersion && + existingOwnerRef.Kind == needle.Kind && + existingOwnerRef.Name == needle.Name && + existingOwnerRef.UID == needle.UID { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/ownerref_test.go b/vendor/github.com/openshift/library-go/pkg/controller/ownerref_test.go new file mode 100644 index 00000000000..0e984910fa1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/ownerref_test.go @@ -0,0 +1,221 @@ +package controller + +import ( + "testing" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +func TestEnsureOwnerRef(t *testing.T) { + tests := []struct { + name string + obj *metav1.ObjectMeta + newOwnerRef metav1.OwnerReference + expectedOwners []metav1.OwnerReference + expectedReturn bool + }{ + { + name: "empty", + obj: &metav1.ObjectMeta{}, + newOwnerRef: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }, + expectedOwners: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid")}, + }, + expectedReturn: true, + }, + { + name: "add", + obj: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + }, + }, + newOwnerRef: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }, + expectedOwners: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid")}, + }, + expectedReturn: true, + }, + { + name: "skip", + obj: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid")}, + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + }, + }, + newOwnerRef: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }, + expectedOwners: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid")}, + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + }, + expectedReturn: false, + }, + { + name: "replace on uid", + obj: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("bad-uid")}, + }, + }, + newOwnerRef: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }, + expectedOwners: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid")}, + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + }, + expectedReturn: true, + }, + { + name: "preserve controller", + obj: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid")}, + }, + }, + newOwnerRef: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), Controller: boolPtr(true), + }, + expectedOwners: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), Controller: boolPtr(true)}, + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + }, + expectedReturn: true, + }, + { + name: "preserve block", + obj: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid")}, + }, + }, + newOwnerRef: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), BlockOwnerDeletion: boolPtr(false), + }, + expectedOwners: []metav1.OwnerReference{ + {APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), BlockOwnerDeletion: boolPtr(false)}, + {APIVersion: "v1", Kind: "Foo", Name: "the-other", UID: types.UID("other-uid")}, + }, + expectedReturn: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actualReturn := EnsureOwnerRef(tc.obj, tc.newOwnerRef) + if tc.expectedReturn != actualReturn { + t.Errorf("expected %v, got %v", tc.expectedReturn, actualReturn) + return + } + if !equality.Semantic.DeepEqual(tc.expectedOwners, tc.obj.OwnerReferences) { + t.Errorf("expected %v, got %v", tc.expectedOwners, tc.obj.OwnerReferences) + return + } + }) + } +} + +func boolPtr(in bool) *bool { + return &in +} + +func TestHasOwnerRef(t *testing.T) { + tests := []struct { + name string + haystack *metav1.ObjectMeta + needle metav1.OwnerReference + expected bool + }{ + { + name: "empty", + haystack: &metav1.ObjectMeta{}, + needle: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }, + expected: false, + }, + { + name: "exact", + haystack: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }}, + }, + needle: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }, + expected: true, + }, + { + name: "not uid", + haystack: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", + }}, + }, + needle: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }, + expected: false, + }, + { + name: "ignored controller", + haystack: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }}, + }, + needle: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), Controller: boolPtr(true), + }, + expected: true, + }, + { + name: "ignored block", + haystack: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), + }}, + }, + needle: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), BlockOwnerDeletion: boolPtr(false), + }, + expected: true, + }, + { + name: "ignored both", + haystack: &metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), Controller: boolPtr(false), + }}, + }, + needle: metav1.OwnerReference{ + APIVersion: "v1", Kind: "Foo", Name: "the-name", UID: types.UID("uid"), BlockOwnerDeletion: boolPtr(false), + }, + expected: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + actual := HasOwnerRef(tc.haystack, tc.needle) + if tc.expected != actual { + t.Errorf("expected %v, got %v", tc.expected, actual) + return + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go new file mode 100644 index 00000000000..a79f3b264a6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -0,0 +1,1157 @@ +package crypto + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "math/big" + mathrand "math/rand" + "net" + "os" + "path/filepath" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/util/cert" +) + +// TLS versions that are known to golang. Go 1.13 adds support for +// TLS 1.3 that's opt-out with a build flag. +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLS versions that are enabled. +var supportedVersions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSVersionToNameOrDie given a tls version as an int, return its readable name +func TLSVersionToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range versions { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} +func TLSVersionOrDie(versionName string) uint16 { + version, err := TLSVersion(versionName) + if err != nil { + panic(err) + } + return version +} + +// TLS versions that are known to golang, but may not necessarily be enabled. +func GolangTLSVersions() []string { + supported := []string{} + for k := range versions { + supported = append(supported, k) + } + sort.Strings(supported) + return supported +} + +// Returns the build enabled TLS versions. +func ValidTLSVersions() []string { + validVersions := []string{} + for k := range supportedVersions { + validVersions = append(validVersions, k) + } + sort.Strings(validVersions) + return validVersions +} +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} + +// ciphersTLS13 copies golang 1.13 implementation, where TLS1.3 suites are not +// configurable (cipherSuites field is ignored for TLS1.3 flows and all of the +// below three - and none other - are used) +var ciphersTLS13 = map[string]uint16{ + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, +} + +var ciphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, +} + +// openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names +// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +var openSSLToIANACiphersMap = map[string]string{ + // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows + // "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 + // "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 + // "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 + + // TLS 1.2 + "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B + "ECDHE-RSA-AES128-GCM-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2F + "ECDHE-ECDSA-AES256-GCM-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x2C + "ECDHE-RSA-AES256-GCM-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x30 + "ECDHE-ECDSA-CHACHA20-POLY1305": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", // 0xCC,0xA9 + "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", // 0xCC,0xA8 + "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23 + "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27 + "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C + "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D + "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C + + // TLS 1 + "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 + "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 + "ECDHE-ECDSA-AES256-SHA": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", // 0xC0,0x0A + "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 + + // SSL 3 + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A +} + +// CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names +func CipherSuitesToNamesOrDie(intVals []uint16) []string { + ret := []string{} + for _, intVal := range intVals { + ret = append(ret, CipherSuiteToNameOrDie(intVal)) + } + + return ret +} + +// CipherSuiteToNameOrDie given a cipher suite as an int, return its readable name +func CipherSuiteToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range ciphers { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func CipherSuite(cipherName string) (uint16, error) { + if cipher, ok := ciphers[cipherName]; ok { + return cipher, nil + } + + if _, ok := ciphersTLS13[cipherName]; ok { + return 0, fmt.Errorf("all golang TLSv1.3 ciphers are always used for TLSv1.3 flows") + } + + return 0, fmt.Errorf("unknown cipher name %q", cipherName) +} + +func CipherSuitesOrDie(cipherNames []string) []uint16 { + if len(cipherNames) == 0 { + return DefaultCiphers() + } + cipherValues := []uint16{} + for _, cipherName := range cipherNames { + cipher, err := CipherSuite(cipherName) + if err != nil { + panic(err) + } + cipherValues = append(cipherValues, cipher) + } + return cipherValues +} +func ValidCipherSuites() []string { + validCipherSuites := []string{} + for k := range ciphers { + validCipherSuites = append(validCipherSuites, k) + } + sort.Strings(validCipherSuites) + return validCipherSuites +} +func DefaultCiphers() []uint16 { + // HTTP/2 mandates TLS 1.2 or higher with an AEAD cipher + // suite (GCM, Poly1305) and ephemeral key exchange (ECDHE, DHE) for + // perfect forward secrecy. Servers may provide additional cipher + // suites for backwards compatibility with HTTP/1.1 clients. + // See RFC7540, section 9.2 (Use of TLS Features) and Appendix A + // (TLS 1.2 Cipher Suite Black List). + return []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // forbidden by http/2 + // the next one is in the intermediate suite, but go1.8 http2isBadCipher() complains when it is included at the recommended index + // because it comes after ciphers forbidden by the http/2 spec + // tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + // tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + } +} + +// SecureTLSConfig enforces the default minimum security settings for the cluster. +func SecureTLSConfig(config *tls.Config) *tls.Config { + if config.MinVersion == 0 { + config.MinVersion = DefaultTLSVersion() + } + + config.PreferServerCipherSuites = true + if len(config.CipherSuites) == 0 { + config.CipherSuites = DefaultCiphers() + } + return config +} + +// OpenSSLToIANACipherSuites maps input OpenSSL Cipher Suite names to their +// IANA counterparts. +// Unknown ciphers are left out. +func OpenSSLToIANACipherSuites(ciphers []string) []string { + ianaCiphers := make([]string, 0, len(ciphers)) + + for _, c := range ciphers { + ianaCipher, found := openSSLToIANACiphersMap[c] + if found { + ianaCiphers = append(ianaCiphers, ianaCipher) + } + } + + return ianaCiphers +} + +type TLSCertificateConfig struct { + Certs []*x509.Certificate + Key crypto.PrivateKey +} + +type TLSCARoots struct { + Roots []*x509.Certificate +} + +func (c *TLSCertificateConfig) WriteCertConfigFile(certFile, keyFile string) error { + // ensure parent dir + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return err + } + certFileWriter, err := os.OpenFile(certFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return err + } + keyFileWriter, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + + if err := writeCertificates(certFileWriter, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFileWriter, c.Key); err != nil { + return err + } + + if err := certFileWriter.Close(); err != nil { + return err + } + if err := keyFileWriter.Close(); err != nil { + return err + } + + return nil +} + +func (c *TLSCertificateConfig) WriteCertConfig(certFile, keyFile io.Writer) error { + if err := writeCertificates(certFile, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFile, c.Key); err != nil { + return err + } + return nil +} + +func (c *TLSCertificateConfig) GetPEMBytes() ([]byte, []byte, error) { + certBytes, err := EncodeCertificates(c.Certs...) + if err != nil { + return nil, nil, err + } + keyBytes, err := encodeKey(c.Key) + if err != nil { + return nil, nil, err + } + + return certBytes, keyBytes, nil +} + +func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, error) { + if len(certFile) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyFile) == 0 { + return nil, errors.New("keyFile missing") + } + + certPEMBlock, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + certs, err := cert.ParseCertsPEM(certPEMBlock) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %s", certFile, err) + } + + keyPEMBlock, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, err + } + keyPairCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertificateConfig, error) { + if len(certBytes) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyBytes) == 0 { + return nil, errors.New("keyFile missing") + } + + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + return nil, fmt.Errorf("Error reading cert: %s", err) + } + + keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +const ( + DefaultCertificateLifetimeInDays = 365 * 2 // 2 years + DefaultCACertificateLifetimeInDays = 365 * 5 // 5 years + + // Default keys are 2048 bits + keyBits = 2048 +) + +type CA struct { + Config *TLSCertificateConfig + + SerialGenerator SerialGenerator +} + +// SerialGenerator is an interface for getting a serial number for the cert. It MUST be thread-safe. +type SerialGenerator interface { + Next(template *x509.Certificate) (int64, error) +} + +// SerialFileGenerator returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +type SerialFileGenerator struct { + SerialFile string + + // lock guards access to the Serial field + lock sync.Mutex + Serial int64 +} + +func NewSerialFileGenerator(serialFile string) (*SerialFileGenerator, error) { + // read serial file, it must already exist + serial, err := fileToSerial(serialFile) + if err != nil { + return nil, err + } + + generator := &SerialFileGenerator{ + Serial: serial, + SerialFile: serialFile, + } + + // 0 is unused and 1 is reserved for the CA itself + // Thus we need to guarantee that the first external call to SerialFileGenerator.Next returns 2+ + // meaning that SerialFileGenerator.Serial must not be less than 1 (it is guaranteed to be non-negative) + if generator.Serial < 1 { + // fake a call to Next so the file stays in sync and Serial is incremented + if _, err := generator.Next(&x509.Certificate{}); err != nil { + return nil, err + } + } + + return generator, nil +} + +// Next returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +func (s *SerialFileGenerator) Next(template *x509.Certificate) (int64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + // do a best effort check to make sure concurrent external writes are not occurring to the underlying serial file + serial, err := fileToSerial(s.SerialFile) + if err != nil { + return 0, err + } + if serial != s.Serial { + return 0, fmt.Errorf("serial file %s out of sync ram=%d disk=%d", s.SerialFile, s.Serial, serial) + } + + next := s.Serial + 1 + s.Serial = next + + // Output in hex, padded to multiples of two characters for OpenSSL's sake + serialText := fmt.Sprintf("%X", next) + if len(serialText)%2 == 1 { + serialText = "0" + serialText + } + // always add a newline at the end to have a valid file + serialText += "\n" + + if err := ioutil.WriteFile(s.SerialFile, []byte(serialText), os.FileMode(0640)); err != nil { + return 0, err + } + return next, nil +} + +func fileToSerial(serialFile string) (int64, error) { + serialData, err := ioutil.ReadFile(serialFile) + if err != nil { + return 0, err + } + + // read the file as a single hex number after stripping any whitespace + serial, err := strconv.ParseInt(string(bytes.TrimSpace(serialData)), 16, 64) + if err != nil { + return 0, err + } + + if serial < 0 { + return 0, fmt.Errorf("invalid negative serial %d in serial file %s", serial, serialFile) + } + + return serial, nil +} + +// RandomSerialGenerator returns a serial based on time.Now and the subject +type RandomSerialGenerator struct { +} + +func (s *RandomSerialGenerator) Next(template *x509.Certificate) (int64, error) { + r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano())) + return r.Int63(), nil +} + +// EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error +// if serialFile is empty, a RandomSerialGenerator will be used +func EnsureCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if ca, err := GetCA(certFile, keyFile, serialFile); err == nil { + return ca, false, err + } + ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, expireDays) + return ca, true, err +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func GetCA(certFile, keyFile, serialFile string) (*CA, error) { + caConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) { + caConfig, err := GetTLSCertificateConfigFromBytes(certBytes, keyBytes) + if err != nil { + return nil, err + } + + return &CA{ + SerialGenerator: &RandomSerialGenerator{}, + Config: caConfig, + }, nil +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile) + + caConfig, err := MakeSelfSignedCAConfig(name, expireDays) + if err != nil { + return nil, err + } + if err := caConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := ioutil.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return MakeSelfSignedCAConfigForSubject(subject, expireDays) +} + +func MakeSelfSignedCAConfigForSubject(subject pkix.Name, expireDays int) (*TLSCertificateConfig, error) { + var caLifetimeInDays = DefaultCACertificateLifetimeInDays + if expireDays > 0 { + caLifetimeInDays = expireDays + } + + if caLifetimeInDays > DefaultCACertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays) + } + + caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) +} + +func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) +} + +func makeSelfSignedCAConfigForSubjectAndDuration(subject pkix.Name, caLifetime time.Duration) (*TLSCertificateConfig, error) { + // Create CA cert + rootcaPublicKey, rootcaPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + // AuthorityKeyId and SubjectKeyId should match for a self-signed CA + authorityKeyId := publicKeyHash + subjectKeyId := publicKeyHash + rootcaTemplate := newSigningCertificateTemplateForDuration(subject, caLifetime, time.Now, authorityKeyId, subjectKeyId) + rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey) + if err != nil { + return nil, err + } + caConfig := &TLSCertificateConfig{ + Certs: []*x509.Certificate{rootcaCert}, + Key: rootcaPrivateKey, + } + return caConfig, nil +} + +func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) (*TLSCertificateConfig, error) { + // Create CA cert + signerPublicKey, signerPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + authorityKeyId := issuer.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + signerTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now, authorityKeyId, subjectKeyId) + signerCert, err := issuer.signCertificate(signerTemplate, signerPublicKey) + if err != nil { + return nil, err + } + signerConfig := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{signerCert}, issuer.Config.Certs...), + Key: signerPrivateKey, + } + return signerConfig, nil +} + +func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetServerCert(certFile, keyFile, hostnames) + if err != nil { + certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, expireDays) + return certConfig, true, err + } + + return certConfig, false, nil +} + +func GetServerCert(certFile, keyFile string, hostnames sets.String) (*TLSCertificateConfig, error) { + server, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + cert := server.Certs[0] + ips, dns := IPAddressesDNSNames(hostnames.List()) + missingIps := ipsNotInSlice(ips, cert.IPAddresses) + missingDns := stringsNotInSlice(dns, cert.DNSNames) + if len(missingIps) == 0 && len(missingDns) == 0 { + klog.V(4).Infof("Found existing server certificate in %s", certFile) + return server, nil + } + + return nil, fmt.Errorf("Existing server certificate in %s was missing some hostnames (%v) or IP addresses (%v).", certFile, missingDns, missingIps) +} + +func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) + + server, err := ca.MakeServerCert(hostnames, expireDays) + if err != nil { + return nil, err + } + if err := server.WriteCertConfigFile(certFile, keyFile); err != nil { + return server, err + } + return server, nil +} + +// CertificateExtensionFunc is passed a certificate that it may extend, or return an error +// if the extension attempt failed. +type CertificateExtensionFunc func(*x509.Certificate) error + +func (ca *CA) MakeServerCert(hostnames sets.String, expireDays int, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), expireDays, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) MakeServerCertForDuration(hostnames sets.String, lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplateForDuration(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), lifetime, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, expireDays) + return certConfig, true, err // true indicates we wrote the files. + } + + return certConfig, false, nil +} + +func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile) + // ensure parent dirs + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return nil, err + } + + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplate(userToSubject(u), expireDays, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + if err = ioutil.WriteFile(certFile, certData, os.FileMode(0644)); err != nil { + return nil, err + } + if err = ioutil.WriteFile(keyFile, keyData, os.FileMode(0600)); err != nil { + return nil, err + } + + return GetTLSCertificateConfig(certFile, keyFile) +} + +func (ca *CA) MakeClientCertificateForDuration(u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplateForDuration(userToSubject(u), lifetime, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + return GetTLSCertificateConfigFromBytes(certData, keyData) +} + +type sortedForDER []string + +func (s sortedForDER) Len() int { + return len(s) +} +func (s sortedForDER) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortedForDER) Less(i, j int) bool { + l1 := len(s[i]) + l2 := len(s[j]) + if l1 == l2 { + return s[i] < s[j] + } + return l1 < l2 +} + +func userToSubject(u user.Info) pkix.Name { + // Ok we are going to order groups in a peculiar way here to workaround a + // 2 bugs, 1 in golang (https://github.com/golang/go/issues/24254) which + // incorrectly encodes Multivalued RDNs and another in GNUTLS clients + // which are too picky (https://gitlab.com/gnutls/gnutls/issues/403) + // and try to "correct" this issue when reading client certs. + // + // This workaround should be killed once Golang's pkix module is fixed to + // generate a correct DER encoding. + // + // The workaround relies on the fact that the first octect that differs + // between the encoding of two group RDNs will end up being the encoded + // length which is directly related to the group name's length. So we'll + // sort such that shortest names come first. + ugroups := u.GetGroups() + groups := make([]string, len(ugroups)) + copy(groups, ugroups) + sort.Sort(sortedForDER(groups)) + + return pkix.Name{ + CommonName: u.GetName(), + SerialNumber: u.GetUID(), + Organization: groups, + } +} + +func (ca *CA) signCertificate(template *x509.Certificate, requestKey crypto.PublicKey) (*x509.Certificate, error) { + // Increment and persist serial + serial, err := ca.SerialGenerator.Next(template) + if err != nil { + return nil, err + } + template.SerialNumber = big.NewInt(serial) + return signCertificate(template, requestKey, ca.Config.Certs[0], ca.Config.Key) +} + +func NewKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { + return newRSAKeyPair() +} + +func newKeyPairWithHash() (crypto.PublicKey, crypto.PrivateKey, []byte, error) { + publicKey, privateKey, err := newRSAKeyPair() + var publicKeyHash []byte + if err == nil { + hash := sha1.New() + hash.Write(publicKey.N.Bytes()) + publicKeyHash = hash.Sum(nil) + } + return publicKey, privateKey, publicKeyHash, err +} + +func newRSAKeyPair() (*rsa.PublicKey, *rsa.PrivateKey, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return nil, nil, err + } + return &privateKey.PublicKey, privateKey, nil +} + +// Can be used for CA or intermediate signing certs +func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(caLifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplate(subject pkix.Name, hosts []string, expireDays int, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime, authorityKeyId, subjectKeyId) +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplateForDuration(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + template := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } + + template.IPAddresses, template.DNSNames = IPAddressesDNSNames(hosts) + + return template +} + +func IPAddressesDNSNames(hosts []string) ([]net.IP, []string) { + ips := []net.IP{} + dns := []string{} + for _, host := range hosts { + if ip := net.ParseIP(host); ip != nil { + ips = append(ips, ip) + } else { + dns = append(dns, host) + } + } + + // Include IP addresses as DNS subjectAltNames in the cert as well, for the sake of Python, Windows (< 10), and unnamed other libraries + // Ensure these technically invalid DNS subjectAltNames occur after the valid ones, to avoid triggering cert errors in Firefox + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1148766 + for _, ip := range ips { + dns = append(dns, ip.String()) + } + + return ips, dns +} + +func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("Could not read any certificates") + } + return certs, nil +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newClientCertificateTemplateForDuration(subject, lifetime, currentTime) +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } +} + +func warnAboutCertificateLifeTime(name string, defaultLifetimeInDays int) { + defaultLifetimeInYears := defaultLifetimeInDays / 365 + fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears) + fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!") +} + +func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, issuer *x509.Certificate, issuerKey crypto.PrivateKey) (*x509.Certificate, error) { + derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, err + } + if len(certs) != 1 { + return nil, errors.New("Expected a single certificate") + } + return certs[0], nil +} + +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} +func encodeKey(key crypto.PrivateKey) ([]byte, error) { + b := bytes.Buffer{} + switch key := key.(type) { + case *ecdsa.PrivateKey: + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return []byte{}, err + } + if err := pem.Encode(&b, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return b.Bytes(), err + } + case *rsa.PrivateKey: + if err := pem.Encode(&b, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil { + return []byte{}, err + } + default: + return []byte{}, errors.New("Unrecognized key type") + + } + return b.Bytes(), nil +} + +func writeCertificates(f io.Writer, certs ...*x509.Certificate) error { + bytes, err := EncodeCertificates(certs...) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} +func writeKeyFile(f io.Writer, key crypto.PrivateKey) error { + bytes, err := encodeKey(key) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} + +func stringsNotInSlice(needles []string, haystack []string) []string { + missing := []string{} + for _, needle := range needles { + if !stringInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func stringInSlice(needle string, haystack []string) bool { + for _, straw := range haystack { + if needle == straw { + return true + } + } + return false +} + +func ipsNotInSlice(needles []net.IP, haystack []net.IP) []net.IP { + missing := []net.IP{} + for _, needle := range needles { + if !ipInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func ipInSlice(needle net.IP, haystack []net.IP) bool { + for _, straw := range haystack { + if needle.Equal(straw) { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto_test.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto_test.go new file mode 100644 index 00000000000..2576d5cc278 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto_test.go @@ -0,0 +1,358 @@ +package crypto + +import ( + "crypto" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "go/importer" + "strings" + "testing" + "time" +) + +const certificateLifetime = 365 * 2 + +func TestConstantMaps(t *testing.T) { + pkg, err := importer.Default().Import("crypto/tls") + if err != nil { + fmt.Printf("error: %s\n", err.Error()) + return + } + discoveredVersions := map[string]bool{} + discoveredCiphers := map[string]bool{} + discoveredCiphersTLS13 := map[string]bool{} + for _, declName := range pkg.Scope().Names() { + if strings.HasPrefix(declName, "VersionTLS") { + discoveredVersions[declName] = true + } + if strings.HasPrefix(declName, "TLS_RSA_") || strings.HasPrefix(declName, "TLS_ECDHE_") { + discoveredCiphers[declName] = true + } + if strings.HasPrefix(declName, "TLS_AES_") || strings.HasPrefix(declName, "TLS_CHACHA20_") { + discoveredCiphersTLS13[declName] = true + } + } + + for k := range discoveredCiphers { + if _, ok := ciphers[k]; !ok { + t.Errorf("discovered cipher tls.%s not in ciphers map", k) + } + } + for k := range ciphers { + if _, ok := discoveredCiphers[k]; !ok { + t.Errorf("ciphers map has %s not in tls package", k) + } + } + + for k := range discoveredCiphersTLS13 { + if _, ok := ciphersTLS13[k]; !ok { + t.Errorf("discovered cipher tls.%s not in ciphers map", k) + } + } + for k := range ciphersTLS13 { + if _, ok := discoveredCiphersTLS13[k]; !ok { + t.Errorf("ciphersTLS13 map has %s not in tls package", k) + } + } + + for k := range discoveredVersions { + if _, ok := versions[k]; !ok { + t.Errorf("discovered version tls.%s not in version map", k) + } + } + for k := range versions { + if _, ok := discoveredVersions[k]; !ok { + t.Errorf("versions map has %s not in tls package", k) + } + } + + for k := range supportedVersions { + if _, ok := discoveredVersions[k]; !ok { + t.Errorf("supported versions map has %s not in tls package", k) + } + } + +} + +func TestCrypto(t *testing.T) { + roots := x509.NewCertPool() + intermediates := x509.NewCertPool() + + // Test CA + fmt.Println("Building CA...") + caKey, caCrt := buildCA(t) + roots.AddCert(caCrt) + + // Test intermediate + fmt.Println("Building intermediate 1...") + intKey, intCrt := buildIntermediate(t, caKey, caCrt) + verify(t, intCrt, x509.VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + }, true, 2) + intermediates.AddCert(intCrt) + + // Test intermediate 2 + fmt.Println("Building intermediate 2...") + intKey2, intCrt2 := buildIntermediate(t, intKey, intCrt) + verify(t, intCrt2, x509.VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + }, true, 3) + intermediates.AddCert(intCrt2) + + // Test server cert + fmt.Println("Building server...") + _, serverCrt := buildServer(t, intKey2, intCrt2) + verify(t, serverCrt, x509.VerifyOptions{ + DNSName: "localhost", + Roots: roots, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, true, 4) + verify(t, serverCrt, x509.VerifyOptions{ + DNSName: "www.example.com", + Roots: roots, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, true, 4) + verify(t, serverCrt, x509.VerifyOptions{ + DNSName: "127.0.0.1", + Roots: roots, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, true, 4) + verify(t, serverCrt, x509.VerifyOptions{ + DNSName: "www.foo.com", + Roots: roots, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, false, 4) + + // Test client cert + fmt.Println("Building client...") + _, clientCrt := buildClient(t, intKey2, intCrt2) + verify(t, clientCrt, x509.VerifyOptions{ + Roots: roots, + Intermediates: intermediates, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + }, true, 4) +} + +// Can be used for CA or intermediate signing certs +func newSigningCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate { + var caLifetimeInDays = DefaultCACertificateLifetimeInDays + if expireDays > 0 { + caLifetimeInDays = expireDays + } + + if caLifetimeInDays > DefaultCACertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays) + } + + caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour + + return newSigningCertificateTemplateForDuration(subject, caLifetime, currentTime, nil, nil) +} + +func buildCA(t *testing.T) (crypto.PrivateKey, *x509.Certificate) { + caPublicKey, caPrivateKey, err := NewKeyPair() + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + caTemplate := newSigningCertificateTemplate(pkix.Name{CommonName: "CA"}, certificateLifetime, time.Now) + caCrt, err := signCertificate(caTemplate, caPublicKey, caTemplate, caPrivateKey) + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + return caPrivateKey, caCrt +} + +func buildIntermediate(t *testing.T, signingKey crypto.PrivateKey, signingCrt *x509.Certificate) (crypto.PrivateKey, *x509.Certificate) { + intermediatePublicKey, intermediatePrivateKey, err := NewKeyPair() + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + intermediateTemplate := newSigningCertificateTemplate(pkix.Name{CommonName: "Intermediate"}, certificateLifetime, time.Now) + intermediateCrt, err := signCertificate(intermediateTemplate, intermediatePublicKey, signingCrt, signingKey) + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + if err := intermediateCrt.CheckSignatureFrom(signingCrt); err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + return intermediatePrivateKey, intermediateCrt +} + +func buildServer(t *testing.T, signingKey crypto.PrivateKey, signingCrt *x509.Certificate) (crypto.PrivateKey, *x509.Certificate) { + serverPublicKey, serverPrivateKey, err := NewKeyPair() + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + hosts := []string{"127.0.0.1", "localhost", "www.example.com"} + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: "Server"}, hosts, certificateLifetime, time.Now, nil, nil) + serverCrt, err := signCertificate(serverTemplate, serverPublicKey, signingCrt, signingKey) + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + if err := serverCrt.CheckSignatureFrom(signingCrt); err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + return serverPrivateKey, serverCrt +} + +func buildClient(t *testing.T, signingKey crypto.PrivateKey, signingCrt *x509.Certificate) (crypto.PrivateKey, *x509.Certificate) { + clientPublicKey, clientPrivateKey, err := NewKeyPair() + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + clientTemplate := newClientCertificateTemplate(pkix.Name{CommonName: "Client"}, certificateLifetime, time.Now) + clientCrt, err := signCertificate(clientTemplate, clientPublicKey, signingCrt, signingKey) + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + if err := clientCrt.CheckSignatureFrom(signingCrt); err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + return clientPrivateKey, clientCrt +} + +func verify(t *testing.T, cert *x509.Certificate, opts x509.VerifyOptions, success bool, chainLength int) { + validChains, err := cert.Verify(opts) + if success { + if err != nil { + t.Fatalf("Unexpected error: %#v", err) + } + if len(validChains) != 1 { + t.Fatalf("Expected a valid chain") + } + if len(validChains[0]) != chainLength { + t.Fatalf("Expected a valid chain of length %d, got %d", chainLength, len(validChains[0])) + } + } else if err == nil && len(validChains) > 0 { + t.Fatalf("Expected failure, got success") + } +} + +func TestRandomSerialGenerator(t *testing.T) { + generator := &RandomSerialGenerator{} + + hostnames := []string{"foo", "bar"} + template := newServerCertificateTemplate(pkix.Name{CommonName: hostnames[0]}, hostnames, certificateLifetime, time.Now, nil, nil) + if _, err := generator.Next(template); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestValidityPeriodOfClientCertificate(t *testing.T) { + currentTime := time.Now() + + currentFakeTime := func() time.Time { + return currentTime + } + + tests := []struct { + passedExpireDays int + realExpireDays int + }{ + { + passedExpireDays: 100, + realExpireDays: 100, + }, + { + passedExpireDays: 0, + realExpireDays: DefaultCertificateLifetimeInDays, + }, + { + passedExpireDays: -1, + realExpireDays: DefaultCertificateLifetimeInDays, + }, + } + + for _, test := range tests { + cert := newClientCertificateTemplate(pkix.Name{CommonName: "client"}, test.passedExpireDays, currentFakeTime) + expirationDate := cert.NotAfter + expectedExpirationDate := currentTime.Add(time.Duration(test.realExpireDays) * 24 * time.Hour) + if expectedExpirationDate != expirationDate { + t.Errorf("expected that client certificate will expire at %v but found %v", expectedExpirationDate, expirationDate) + } + } +} + +func TestValidityPeriodOfServerCertificate(t *testing.T) { + currentTime := time.Now() + + currentFakeTime := func() time.Time { + return currentTime + } + + tests := []struct { + passedExpireDays int + realExpireDays int + }{ + { + passedExpireDays: 100, + realExpireDays: 100, + }, + { + passedExpireDays: 0, + realExpireDays: DefaultCertificateLifetimeInDays, + }, + { + passedExpireDays: -1, + realExpireDays: DefaultCertificateLifetimeInDays, + }, + } + + for _, test := range tests { + cert := newServerCertificateTemplate( + pkix.Name{CommonName: "server"}, + []string{"www.example.com"}, + test.passedExpireDays, + currentFakeTime, + nil, + nil, + ) + expirationDate := cert.NotAfter + expectedExpirationDate := currentTime.Add(time.Duration(test.realExpireDays) * 24 * time.Hour) + if expectedExpirationDate != expirationDate { + t.Errorf("expected that server certificate will expire at %v but found %v", expectedExpirationDate, expirationDate) + } + } +} + +func TestValidityPeriodOfSigningCertificate(t *testing.T) { + currentTime := time.Now() + + currentFakeTime := func() time.Time { + return currentTime + } + + tests := []struct { + passedExpireDays int + realExpireDays int + }{ + { + passedExpireDays: 100, + realExpireDays: 100, + }, + { + passedExpireDays: 0, + realExpireDays: DefaultCACertificateLifetimeInDays, + }, + { + passedExpireDays: -1, + realExpireDays: DefaultCACertificateLifetimeInDays, + }, + } + + for _, test := range tests { + cert := newSigningCertificateTemplate(pkix.Name{CommonName: "CA"}, test.passedExpireDays, currentFakeTime) + expirationDate := cert.NotAfter + expectedExpirationDate := currentTime.Add(time.Duration(test.realExpireDays) * 24 * time.Hour) + if expectedExpirationDate != expirationDate { + t.Errorf("expected that CA certificate will expire at %v but found %v", expectedExpirationDate, expirationDate) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go new file mode 100644 index 00000000000..0aa127037c8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go @@ -0,0 +1,20 @@ +package crypto + +import ( + "crypto/x509" + "time" +) + +// FilterExpiredCerts checks are all certificates in the bundle valid, i.e. they have not expired. +// The function returns new bundle with only valid certificates or error if no valid certificate is found. +func FilterExpiredCerts(certs ...*x509.Certificate) []*x509.Certificate { + currentTime := time.Now() + var validCerts []*x509.Certificate + for _, c := range certs { + if c.NotAfter.After(currentTime) { + validCerts = append(validCerts, c) + } + } + + return validCerts +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation_test.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation_test.go new file mode 100644 index 00000000000..f0c854ef7f2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation_test.go @@ -0,0 +1,93 @@ +package crypto + +import ( + "crypto/x509" + "crypto/x509/pkix" + "io/ioutil" + "math/big" + "testing" + "time" + + "k8s.io/client-go/util/cert" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidateCertificates(t *testing.T) { + c, err := newTestCACertificate(pkix.Name{CommonName: "test"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + if err != nil { + t.Fatal(err) + } + + if len(c.Config.Certs) != 1 { + t.Fatalf("expected 1 certificate in the chain, but got %d", len(c.Config.Certs)) + } + + validCerts := FilterExpiredCerts(c.Config.Certs...) + if len(validCerts) != 1 { + t.Fatalf("expected 1 valid certificate in the chain, but got %d", len(validCerts)) + } +} + +func TestValidateCertificatesExpired(t *testing.T) { + certBytes, err := ioutil.ReadFile("./testfiles/tls-expired.crt") + if err != nil { + t.Fatal(err) + } + if err != nil { + t.Fatal(err) + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + t.Fatal(err) + } + + newCert, err := newTestCACertificate(pkix.Name{CommonName: "etcdproxy-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + if err != nil { + t.Fatal(err) + } + certs = append(certs, newCert.Config.Certs...) + + if len(certs) != 2 { + t.Fatalf("expected 2 certificate in the chain, but got %d", len(certs)) + } + + validCerts := FilterExpiredCerts(certs...) + if len(validCerts) != 1 { + t.Fatalf("expected 1 valid certificate in the chain, but got %d", len(validCerts)) + } +} + +// NewCACertificate generates and signs new CA certificate and key. +func newTestCACertificate(subject pkix.Name, serialNumber int64, validity metav1.Duration, currentTime func() time.Time) (*CA, error) { + caPublicKey, caPrivateKey, err := NewKeyPair() + if err != nil { + return nil, err + } + + caCert := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(validity.Duration), + SerialNumber: big.NewInt(serialNumber), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + cert, err := signCertificate(caCert, caPublicKey, caCert, caPrivateKey) + if err != nil { + return nil, err + } + + return &CA{ + Config: &TLSCertificateConfig{ + Certs: []*x509.Certificate{cert}, + Key: caPrivateKey, + }, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-expired.crt b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-expired.crt new file mode 100644 index 00000000000..b6140c7abb8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-expired.crt @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICMjCCAdmgAwIBAgIUdTpx2/qycBZJltbEdfTyfKyJjG0wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTIwODAwWhcN +MTgwNzMwMTIwOTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABMlJR5tWK7vgCytCxBQov1xNp+R9RG2wI1w9 +SXIn+Za97Nf6krdyUDd+P6QSSJDkRTQZDsGiCpJhgd5kAzFNUkajgZgwgZUwDgYD +VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw +HQYDVR0OBBYEFOERFpshmIXspqXoox9gnSFGmm3PMB8GA1UdIwQYMBaAFCtdC7xd +NJKjmyiwhZJH7LBLOLrgMCAGA1UdEQQZMBeCFWV0Y2Rwcm94eS10ZXN0cy5sb2Nh +bDAKBggqhkjOPQQDAgNHADBEAiAvsq9L5uk0jg3v2z1xemAUwPXrEIAcbJhXFfC0 +QmVGGgIgFT9d/inKJcm/NfAgDGkoXSvHGv0NKAZpR32Dqriobh4= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-multiple.crt b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-multiple.crt new file mode 100644 index 00000000000..b321982a740 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls-multiple.crt @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUQ0hq1Lmd6ujao+8Iy6LfpMdyNI8wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMDAwWhcN +MjMwNzI5MTExMDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABGoowUY2eQdvaHG4S/UMYD6mjs6/P7mmhizl +KWO03gq2eVSsbiYAnCJok3o2WQ01GtcS6bOUJ1DOG0gLTRfQ/lWjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQmqCeN+suT +0JjgSxtCqTln7zonHjAfBgNVHSMEGDAWgBQmqCeN+suT0JjgSxtCqTln7zonHjAK +BggqhkjOPQQDAgNIADBFAiAUKV8vkiIoCiqtHQsp3PrUUV3He2B9K1tQgA8loTa+ +IQIhANPbCDVoPSFsX0I5iG/DQl/MmTo/tlsmNkN99j1j2JIM +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUU8ZsD37pcA1UYkgwhR6d/KjdGeAwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABLupsOF50q6GE7z2US77t5iLGe9wdOFwHssC +jUjCEGvJ/d2sGMxdiABJrrB8gau6TilrJCy9ZTYj56fzdReUnsKjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRhaKyklrnI +wd2kg84t1D8CvDVtdjAfBgNVHSMEGDAWgBRhaKyklrnIwd2kg84t1D8CvDVtdjAK +BggqhkjOPQQDAgNIADBFAiAOCYqtOamRapNc+XxR7IFzlr7Si7EvjQ+ej5SKHb7g +rgIhAIBd1dtMc0KJSFsoxnQZailkFi5Nlea2eHU1wEDKVb40 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB/zCCAaagAwIBAgIUVCSMefpK8uxDKy87jKnwc97DseIwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABOhGVuxW0nEQ5REqQdRF1eJ7OUOdXB/oDJed +Jr1ezcyhJyCRvD9DfadSBvMHFyzw7ssBIIMm4C3Eufj96M3tSACjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBTLR9qOF3Hh +if8KUbkrRYUK13xSSDAfBgNVHSMEGDAWgBTLR9qOF3Hhif8KUbkrRYUK13xSSDAK +BggqhkjOPQQDAgNHADBEAiAFD2zRXnp40wVeffwpkU+ToFF6Nts/HJk02iMr/+km +RgIgRLZxonlkyLlUHucMKC2V+4UJ9akEbu/bhCXKuQb2DgY= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.crt b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.crt new file mode 100644 index 00000000000..862bdbc2df0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.crt @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUWke4fSfaCH+2MLSFeTHBpoi+h1YwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTA1MDAwWhcN +MjMwNzI5MTA1MDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABHoqBfTXFdWRATfdrr/v5UriZBxmzL5aiwLZ +VRUg2UZNnoH2JLUcDkqx3IQakjoVijweiQeqxAai3mxjtgxbh+ajZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSgDE3RpOiH +Gv7AEnYKRk46zVIkbzAfBgNVHSMEGDAWgBSgDE3RpOiHGv7AEnYKRk46zVIkbzAK +BggqhkjOPQQDAgNIADBFAiA3Gg/gwiEfjclpQYyd3qTgdCWzud8GKRdjVK3Z2BXW +swIhANMuxi0Y41mwcmh3a2icpdeGHGyGNdNDe8uF+5csuNUp +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.key b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.key new file mode 100644 index 00000000000..83cf18be622 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/testfiles/tls.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIC+UyR59JEbt/qjWZG/87ZYzk0pOgTBmpx5R0w6uG66JoAoGCCqGSM49 +AwEHoUQDQgAEeioF9NcV1ZEBN92uv+/lSuJkHGbMvlqLAtlVFSDZRk2egfYktRwO +SrHchBqSOhWKPB6JB6rEBqLebGO2DFuH5g== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/openshift/library-go/pkg/git/OWNERS b/vendor/github.com/openshift/library-go/pkg/git/OWNERS new file mode 100644 index 00000000000..e3dd4519667 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/OWNERS @@ -0,0 +1,12 @@ +reviewers: + - smarterclayton + - csrwng + - bparees + - gabemontero + - mfojtik + - jim-minter +approvers: + - smarterclayton + - csrwng + - bparees + - mfojtik diff --git a/vendor/github.com/openshift/library-go/pkg/git/doc.go b/vendor/github.com/openshift/library-go/pkg/git/doc.go new file mode 100644 index 00000000000..e1a419ef715 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/doc.go @@ -0,0 +1,2 @@ +// Package git allows working with Git repositories +package git diff --git a/vendor/github.com/openshift/library-go/pkg/git/git.go b/vendor/github.com/openshift/library-go/pkg/git/git.go new file mode 100644 index 00000000000..1ac681b8c5e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/git.go @@ -0,0 +1,51 @@ +package git + +import ( + "bufio" + "io" + "net/url" + "path" + "strings" +) + +// NameFromRepositoryURL suggests a name for a repository URL based on the last +// segment of the path, or returns false +func NameFromRepositoryURL(url *url.URL) (string, bool) { + // from path + if len(url.Path) > 0 { + base := path.Base(url.Path) + if len(base) > 0 && base != "/" { + if ext := path.Ext(base); ext == ".git" { + base = base[:len(base)-4] + } + return base, true + } + } + return "", false +} + +type ChangedRef struct { + Ref string + Old string + New string +} + +func ParsePostReceive(r io.Reader) ([]ChangedRef, error) { + refs := []ChangedRef{} + scan := bufio.NewScanner(r) + for scan.Scan() { + segments := strings.Split(scan.Text(), " ") + if len(segments) != 3 { + continue + } + refs = append(refs, ChangedRef{ + Ref: segments[2], + Old: segments[0], + New: segments[1], + }) + } + if err := scan.Err(); err != nil && err != io.EOF { + return nil, err + } + return refs, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/git/repository.go b/vendor/github.com/openshift/library-go/pkg/git/repository.go new file mode 100644 index 00000000000..0f212359063 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/repository.go @@ -0,0 +1,552 @@ +package git + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "syscall" + "time" + + "k8s.io/klog" +) + +// Repository represents a git source repository +type Repository interface { + GetRootDir(dir string) (string, error) + GetOriginURL(dir string) (string, bool, error) + GetRef(dir string) string + Clone(dir string, url string) error + CloneWithOptions(dir string, url string, args ...string) error + CloneBare(dir string, url string) error + CloneMirror(dir string, url string) error + Fetch(dir string, url string, ref string) error + Checkout(dir string, ref string) error + PotentialPRRetryAsFetch(dir string, url string, ref string, err error) error + SubmoduleUpdate(dir string, init, recursive bool) error + Archive(dir, ref, format string, w io.Writer) error + Init(dir string, bare bool) error + Add(dir string, spec string) error + Commit(dir string, message string) error + AddRemote(dir string, name, url string) error + AddLocalConfig(dir, name, value string) error + ShowFormat(dir, commit, format string) (string, error) + ListRemote(url string, args ...string) (string, string, error) + TimedListRemote(timeout time.Duration, url string, args ...string) (string, string, error) + GetInfo(location string) (*SourceInfo, []error) +} + +const ( + // defaultCommandTimeout is the default timeout for git commands that we want to enforce timeouts on + defaultCommandTimeout = 30 * time.Second + + // Shallow maps to --depth=1, which clones a Git repository without + // downloading history + Shallow = "--depth=1" + + // noCommandTimeout signals that there should be no timeout for the command when passed as the timeout + // for the default timedExecGitFunc + noCommandTimeout = 0 * time.Second +) + +// ErrGitNotAvailable will be returned if the git call fails because a git binary +// could not be found +var ErrGitNotAvailable = errors.New("git binary not available") + +// SourceInfo stores information about the source code +type SourceInfo struct { + // Ref represents a commit SHA-1, valid Git branch name or a Git tag + // The output image will contain this information as 'io.openshift.build.commit.ref' label. + Ref string + + // CommitID represents an arbitrary extended object reference in Git as SHA-1 + // The output image will contain this information as 'io.openshift.build.commit.id' label. + CommitID string + + // Date contains a date when the committer created the commit. + // The output image will contain this information as 'io.openshift.build.commit.date' label. + Date string + + // AuthorName contains the name of the author + // The output image will contain this information (along with AuthorEmail) as 'io.openshift.build.commit.author' label. + AuthorName string + + // AuthorEmail contains the e-mail of the author + // The output image will contain this information (along with AuthorName) as 'io.openshift.build.commit.author' lablel. + AuthorEmail string + + // CommitterName contains the name of the committer + CommitterName string + + // CommitterEmail contains the e-mail of the committer + CommitterEmail string + + // Message represents the first 80 characters from the commit message. + // The output image will contain this information as 'io.openshift.build.commit.message' label. + Message string + + // Location contains a valid URL to the original repository. + // The output image will contain this information as 'io.openshift.build.source-location' label. + Location string + + // ContextDir contains path inside the Location directory that + // contains the application source code. + // The output image will contain this information as 'io.openshift.build.source-context-dir' + // label. + ContextDir string +} + +// execGitFunc is a function that executes a Git command +type execGitFunc func(dir string, args ...string) (string, string, error) + +// timedExecGitFunc is a function that executes a Git command with a timeout +type timedExecGitFunc func(timeout time.Duration, dir string, args ...string) (string, string, error) + +type repository struct { + git execGitFunc + timedGit timedExecGitFunc + + shallow bool +} + +// NewRepository creates a new Repository +func NewRepository() Repository { + return NewRepositoryWithEnv(nil) +} + +// NewRepositoryForEnv creates a new Repository using the specified environment +func NewRepositoryWithEnv(env []string) Repository { + return &repository{ + git: func(dir string, args ...string) (string, string, error) { + return command("git", dir, env, args...) + }, + timedGit: func(timeout time.Duration, dir string, args ...string) (string, string, error) { + return timedCommand(timeout, "git", dir, env, args...) + }, + } +} + +// NewRepositoryForBinary returns a Repository using the specified +// git executable. +func NewRepositoryForBinary(gitBinaryPath string) Repository { + return NewRepositoryForBinaryWithEnvironment(gitBinaryPath, nil) +} + +// NewRepositoryForBinary returns a Repository using the specified +// git executable and environment +func NewRepositoryForBinaryWithEnvironment(gitBinaryPath string, env []string) Repository { + return &repository{ + git: func(dir string, args ...string) (string, string, error) { + return command(gitBinaryPath, dir, env, args...) + }, + timedGit: func(timeout time.Duration, dir string, args ...string) (string, string, error) { + return timedCommand(timeout, gitBinaryPath, dir, env, args...) + }, + } +} + +// IsRoot returns true if location is the root of a bare git repository +func IsBareRoot(path string) (bool, error) { + _, err := os.Stat(filepath.Join(path, "HEAD")) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +// PotentialPRRetryAsFetch is used on checkout errors after a clone where the possibility +// that a fetch or a PR ref is needed between the clone and checkout operations +// Cases include: 1) GitHub PRs (example ref forms: (refs/)?pull/[1-9][0-9]*/head); +// 2) Refs which the RHEL7 git version appears to be too old to handle correctly +// (example ref form: foo-bar-1), but which newer git versions seem to manage OK. +func (r *repository) PotentialPRRetryAsFetch(dir, remote, ref string, err error) error { + klog.V(4).Infof("Checkout after clone failed for ref %s with error: %v, attempting fetch", ref, err) + err = r.Fetch(dir, remote, ref) + if err != nil { + return err + } + + err = r.Checkout(dir, "FETCH_HEAD") + if err != nil { + return err + } + klog.V(4).Infof("Fetch / checkout for %s successful", ref) + return nil +} + +// GetRootDir obtains the directory root for a Git repository +func (r *repository) GetRootDir(location string) (string, error) { + dir, _, err := r.git(location, "rev-parse", "--git-dir") + if err != nil { + return "", err + } + if dir == "" { + return "", fmt.Errorf("%s is not a git repository", dir) + } + if strings.HasSuffix(dir, ".git") { + dir = dir[:len(dir)-4] + if strings.HasSuffix(dir, "/") { + dir = dir[:len(dir)-1] + } + } + if len(dir) == 0 { + dir = location + } + return dir, nil +} + +var ( + remoteURLExtract = regexp.MustCompile("^remote\\.(.*)\\.url (.*?)$") + remoteOriginNames = []string{"origin", "upstream", "github", "openshift", "heroku"} +) + +// GetOriginURL returns the origin branch URL for the git repository +func (r *repository) GetOriginURL(location string) (string, bool, error) { + text, _, err := r.git(location, "config", "--get-regexp", "^remote\\..*\\.url$") + if err != nil { + if IsExitCode(err, 1) { + return "", false, nil + } + return "", false, err + } + + remotes := make(map[string]string) + s := bufio.NewScanner(bytes.NewBufferString(text)) + for s.Scan() { + if matches := remoteURLExtract.FindStringSubmatch(s.Text()); matches != nil { + remotes[matches[1]] = matches[2] + } + } + if err := s.Err(); err != nil { + return "", false, err + } + for _, remote := range remoteOriginNames { + if url, ok := remotes[remote]; ok { + return url, true, nil + } + } + + return "", false, nil +} + +// GetRef retrieves the current branch reference for the git repository +func (r *repository) GetRef(location string) string { + branch, _, err := r.git(location, "symbolic-ref", "-q", "--short", "HEAD") + if err != nil { + branch = "" + } + return branch +} + +// AddRemote adds a new remote to the repository. +func (r *repository) AddRemote(location, name, url string) error { + _, _, err := r.git(location, "remote", "add", name, url) + return err +} + +// AddLocalConfig adds a value to the current repository +func (r *repository) AddLocalConfig(location, name, value string) error { + _, _, err := r.git(location, "config", "--local", "--add", name, value) + return err +} + +// CloneWithOptions clones a remote git repository to a local directory +func (r *repository) CloneWithOptions(location string, url string, args ...string) error { + gitArgs := []string{"clone"} + gitArgs = append(gitArgs, args...) + gitArgs = append(gitArgs, url) + gitArgs = append(gitArgs, location) + + // We need to check to see if we're importing reference information, for + // for error checking later on + for _, opt := range gitArgs { + if opt == Shallow { + r.shallow = true + break + } + } + + _, _, err := r.git("", gitArgs...) + return err +} + +// Clone clones a remote git repository to a local directory +func (r *repository) Clone(location string, url string) error { + return r.CloneWithOptions(location, url, "--recursive") +} + +// CloneMirror clones a remote git repository to a local directory as a mirror +func (r *repository) CloneMirror(location string, url string) error { + return r.CloneWithOptions(location, url, "--mirror") +} + +// CloneBare clones a remote git repository to a local directory +func (r *repository) CloneBare(location string, url string) error { + return r.CloneWithOptions(location, url, "--bare") +} + +// ListRemote lists references in a remote repository +// ListRemote will time out with a default timeout of 10s. If a different timeout is +// required, TimedListRemote should be used instead +func (r *repository) ListRemote(url string, args ...string) (string, string, error) { + return r.TimedListRemote(defaultCommandTimeout, url, args...) +} + +// TimedListRemote lists references in a remote repository, or fails if the list does +// not complete before the given timeout +func (r *repository) TimedListRemote(timeout time.Duration, url string, args ...string) (string, string, error) { + gitArgs := []string{"ls-remote"} + gitArgs = append(gitArgs, args...) + gitArgs = append(gitArgs, url) + // `git ls-remote` does not allow for any timeout to be set, and defaults to a timeout + // of five minutes, so we enforce a timeout here to allow it to fail eariler than that + return r.timedGit(timeout, "", gitArgs...) +} + +// Fetch updates the provided git repository +func (r *repository) Fetch(location, uri, ref string) error { + _, _, err := r.git(location, "fetch", uri, ref) + return err +} + +// Archive creates a archive of the Git repo at directory location at commit ref and with the given Git format, +// and then writes that to the provided io.Writer +func (r *repository) Archive(location, ref, format string, w io.Writer) error { + stdout, _, err := r.git(location, "archive", fmt.Sprintf("--format=%s", format), ref) + w.Write([]byte(stdout)) + return err +} + +// Checkout switches to the given ref for the git repository +func (r *repository) Checkout(location string, ref string) error { + if r.shallow { + return errors.New("cannot checkout ref on shallow clone") + } + _, _, err := r.git(location, "checkout", ref, "--") + return err +} + +// SubmoduleUpdate updates submodules, optionally recursively +func (r *repository) SubmoduleUpdate(location string, init, recursive bool) error { + updateArgs := []string{"submodule", "update"} + if init { + updateArgs = append(updateArgs, "--init") + } + if recursive { + updateArgs = append(updateArgs, "--recursive") + } + + _, _, err := r.git(location, updateArgs...) + return err +} + +// ShowFormat formats the ref with the given git show format string +func (r *repository) ShowFormat(location, ref, format string) (string, error) { + out, _, err := r.git(location, "show", "-s", ref, fmt.Sprintf("--format=%s", format)) + return out, err +} + +// Init initializes a new git repository in the provided location +func (r *repository) Init(location string, bare bool) error { + args := []string{"init"} + if bare { + args = append(args, "--bare") + } + args = append(args, location) + _, _, err := r.git("", args...) + return err +} + +func (r *repository) Add(location, spec string) error { + _, _, err := r.git(location, "add", spec) + return err +} + +func (r *repository) Commit(location, message string) error { + _, _, err := r.git(location, "commit", "-m", message) + return err +} + +// GetInfo retrieves the informations about the source code and commit +func (r *repository) GetInfo(location string) (*SourceInfo, []error) { + errors := []error{} + git := func(arg ...string) string { + stdout, stderr, err := r.git(location, arg...) + if err != nil { + errors = append(errors, fmt.Errorf("error invoking 'git %s': %v. Out: %s, Err: %s", + strings.Join(arg, " "), err, stdout, stderr)) + } + return strings.TrimSpace(stdout) + } + info := &SourceInfo{} + info.Ref = git("rev-parse", "--abbrev-ref", "HEAD") + info.CommitID = git("rev-parse", "--verify", "HEAD") + info.AuthorName = git("--no-pager", "show", "-s", "--format=%an", "HEAD") + info.AuthorEmail = git("--no-pager", "show", "-s", "--format=%ae", "HEAD") + info.CommitterName = git("--no-pager", "show", "-s", "--format=%cn", "HEAD") + info.CommitterEmail = git("--no-pager", "show", "-s", "--format=%ce", "HEAD") + info.Date = git("--no-pager", "show", "-s", "--format=%ad", "HEAD") + info.Message = git("--no-pager", "show", "-s", "--format=%<(80,trunc)%s", "HEAD") + + // it is not required for a Git repository to have a remote "origin" defined + if out, _, err := r.git(location, "config", "--get", "remote.origin.url"); err == nil { + info.Location = out + } + + return info, errors +} + +// command executes an external command in the given directory. +// The command's standard out and error are trimmed and returned as strings +// It may return the type *GitError if the command itself fails. +func command(name, dir string, env []string, args ...string) (stdout, stderr string, err error) { + return timedCommand(noCommandTimeout, name, dir, env, args...) +} + +// timedCommand executes an external command in the given directory with a timeout. +// The command's standard out and error are returned as strings. +// It may return the type *GitError if the command itself fails or the type *TimeoutError +// if the command times out before finishing. +// If the git binary cannot be found, ErrGitNotAvailable will be returned as the error. +func timedCommand(timeout time.Duration, name, dir string, env []string, args ...string) (stdout, stderr string, err error) { + var stdoutBuffer, stderrBuffer bytes.Buffer + + klog.V(4).Infof("Executing %s %s", name, strings.Join(args, " ")) + + cmd := exec.Command(name, args...) + cmd.Dir = dir + cmd.Env = env + cmd.Stdout = &stdoutBuffer + cmd.Stderr = &stderrBuffer + + if env != nil { + klog.V(8).Infof("Environment:\n") + for _, e := range env { + klog.V(8).Infof("- %s", e) + } + } + + err, timedOut := runCommand(cmd, timeout) + if timedOut { + return "", "", &TimeoutError{ + Err: fmt.Errorf("execution of %s %s timed out after %s", name, strings.Join(args, " "), timeout), + } + } + + // we don't want captured output to have a trailing newline for formatting reasons + stdout, stderr = strings.TrimRight(stdoutBuffer.String(), "\n"), strings.TrimRight(stderrBuffer.String(), "\n") + + // check whether git was available in the first place + if err != nil { + if !isBinaryInstalled(name) { + return "", "", ErrGitNotAvailable + } + } + + // if we encounter an error we recognize, return a typed error + if exitErr, ok := err.(*exec.ExitError); ok { + return stdout, stderr, &GitError{ + Err: exitErr, + Stdout: stdout, + Stderr: stderr, + } + } + + // if we didn't encounter an ExitError or a timeout, simply return the error + return stdout, stderr, err +} + +// runCommand runs the command with the given timeout, and returns any errors encountered and whether +// the command timed out or not +func runCommand(cmd *exec.Cmd, timeout time.Duration) (error, bool) { + out := make(chan error) + go func() { + if err := cmd.Start(); err != nil { + klog.V(4).Infof("Error starting execution: %v", err) + } + out <- cmd.Wait() + }() + + if timeout == noCommandTimeout { + select { + case err := <-out: + if err != nil { + klog.V(4).Infof("Error executing command: %v", err) + } + return err, false + } + } else { + select { + case err := <-out: + if err != nil { + klog.V(4).Infof("Error executing command: %v", err) + } + return err, false + case <-time.After(timeout): + klog.V(4).Infof("Command execution timed out after %s", timeout) + return nil, true + } + } +} + +// TimeoutError is returned when the underlying Git coommand times out before finishing +type TimeoutError struct { + Err error +} + +func (e *TimeoutError) Error() string { + return e.Err.Error() +} + +// GitError is returned when the underlying Git command returns a non-zero exit code. +type GitError struct { + Err error + Stdout string + Stderr string +} + +func (e *GitError) Error() string { + if len(e.Stderr) > 0 { + return e.Stderr + } + return e.Err.Error() +} + +func IsExitCode(err error, exitCode int) bool { + switch t := err.(type) { + case *GitError: + return IsExitCode(t.Err, exitCode) + case *exec.ExitError: + if ws, ok := t.Sys().(syscall.WaitStatus); ok { + return ws.ExitStatus() == exitCode + } + return false + } + return false +} + +func gitBinary() string { + if runtime.GOOS == "windows" { + return "git.exe" + } + return "git" +} + +func IsGitInstalled() bool { + return isBinaryInstalled(gitBinary()) +} + +func isBinaryInstalled(name string) bool { + _, err := exec.LookPath(name) + return err == nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/git/repository_test.go b/vendor/github.com/openshift/library-go/pkg/git/repository_test.go new file mode 100644 index 00000000000..6c083b43506 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/git/repository_test.go @@ -0,0 +1,149 @@ +package git + +import ( + "testing" + "time" +) + +func TestGetRootDir(t *testing.T) { + curDir := "/tests/dir" + tests := []struct { + stdout string + err bool + expected string + }{ + {"test/result/dir/.git", false, "test/result/dir"}, // The .git directory should be removed + {".git", false, curDir}, // When only .git is returned, it is the current dir + {"", true, ""}, // When blank is returned, this is not a git repository + } + for _, test := range tests { + r := &repository{git: makeExecFunc(test.stdout, nil)} + result, err := r.GetRootDir(curDir) + if !test.err && err != nil { + t.Errorf("Unexpected error: %v", err) + } + if test.err && err == nil { + t.Errorf("Expected error, but got no error.") + } + if !test.err && result != test.expected { + t.Errorf("Unexpected result: %s. Expected: %s", result, test.expected) + } + } +} + +func TestGetOriginURL(t *testing.T) { + url := "remote.origin.url https://test.com/a/repository/url" + r := &repository{git: makeExecFunc(url, nil)} + result, ok, err := r.GetOriginURL("/test/dir") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !ok { + t.Error("Unexpected not ok") + } + if result != "https://test.com/a/repository/url" { + t.Errorf("Unexpected result: %s. Expected: %s", result, url) + } +} + +func TestGetAlterativeOriginURL(t *testing.T) { + url := "remote.foo.url https://test.com/a/repository/url\nremote.upstream.url https://test.com/b/repository/url" + r := &repository{git: makeExecFunc(url, nil)} + result, ok, err := r.GetOriginURL("/test/dir") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if !ok { + t.Error("Unexpected not ok") + } + if result != "https://test.com/b/repository/url" { + t.Errorf("Unexpected result: %s. Expected: %s", result, url) + } +} + +func TestGetMissingOriginURL(t *testing.T) { + url := "remote.foo.url https://test.com/a/repository/url\nremote.bar.url https://test.com/b/repository/url" + r := &repository{git: makeExecFunc(url, nil)} + result, ok, err := r.GetOriginURL("/test/dir") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if ok { + t.Error("Unexpected ok") + } + if result != "" { + t.Errorf("Unexpected result: %s. Expected: %s", result, "") + } +} + +func TestGetRef(t *testing.T) { + ref := "branch1" + r := &repository{git: makeExecFunc(ref, nil)} + result := r.GetRef("/test/dir") + if result != ref { + t.Errorf("Unexpected result: %s. Expected: %s", result, ref) + } +} + +func TestClone(t *testing.T) { + r := &repository{git: makeExecFunc("", nil)} + err := r.Clone("/test/dir", "https://test/url/to/repository") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestFetch(t *testing.T) { + r := &repository{git: makeExecFunc("", nil)} + err := r.Fetch("/test/dir", "https://test/url/to/repository", "refs/pull/1/head") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestCheckout(t *testing.T) { + r := &repository{git: makeExecFunc("", nil)} + err := r.Checkout("/test/dir", "branch2") + if err != nil { + t.Errorf("Unexpected error: %v", err) + } +} + +func makeExecFunc(output string, err error) execGitFunc { + return func(dir string, args ...string) (out string, errout string, resultErr error) { + out = output + resultErr = err + return + } +} + +// TestTimedCommandTimeout tests that the `oc new-app` machinery that invokes `git ls-remote` +// on uncooperative servers correctly times out +func TestTimedCommandTimeout(t *testing.T) { + timeout := 1 * time.Millisecond + + outputChannel := make(chan timedCommandOutput) + go func() { + stdout, stderr, err := timedCommand(timeout, "yes", "/usr/bin", nil, []string{}...) + outputChannel <- timedCommandOutput{ + stdout: stdout, + stderr: stderr, + err: err, + } + }() + + select { + case output := <-outputChannel: + if _, ok := output.err.(*TimeoutError); !ok { + t.Fatalf("expected command to fail due to timeout, got: %v", output.err) + } + case <-time.After(1000 * timeout): + t.Fatalf("expected command to have timed out, but it didn't") + } +} + +type timedCommandOutput struct { + stdout string + stderr string + err error +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client.go new file mode 100644 index 00000000000..7133063d8a1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client.go @@ -0,0 +1,999 @@ +package dockerv1client + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "path" + "strings" + "time" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + + godockerclient "github.com/fsouza/go-dockerclient" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + knet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/transport" + "k8s.io/klog" + + "github.com/openshift/api/image/docker10" + "github.com/openshift/library-go/pkg/image/reference" +) + +var ( + ImageScheme = runtime.NewScheme() +) + +func init() { + utilruntime.Must(ImageScheme.AddConversionFuncs( + // Convert godockerclient client object to internal object + func(in *godockerclient.Image, out *docker10.DockerImage, s conversion.Scope) error { + if err := s.Convert(&in.Config, &out.Config, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + if err := s.Convert(&in.ContainerConfig, &out.ContainerConfig, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + out.ID = in.ID + out.Parent = in.Parent + out.Comment = in.Comment + out.Created = metav1.NewTime(in.Created) + out.Container = in.Container + out.DockerVersion = in.DockerVersion + out.Author = in.Author + out.Architecture = in.Architecture + out.Size = in.Size + return nil + }, + func(in *docker10.DockerImage, out *godockerclient.Image, s conversion.Scope) error { + if err := s.Convert(&in.Config, &out.Config, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + if err := s.Convert(&in.ContainerConfig, &out.ContainerConfig, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + out.ID = in.ID + out.Parent = in.Parent + out.Comment = in.Comment + out.Created = in.Created.Time + out.Container = in.Container + out.DockerVersion = in.DockerVersion + out.Author = in.Author + out.Architecture = in.Architecture + out.Size = in.Size + return nil + }, + )) +} + +type Image struct { + Image godockerclient.Image + + // Does this registry support pull by ID + PullByID bool +} + +// Client includes methods for accessing a Docker registry by name. +type Client interface { + // Connect to a Docker registry by name. Pass "" for the Docker Hub + Connect(registry string, allowInsecure bool) (Connection, error) +} + +// Connection allows you to retrieve data from a Docker V1/V2 registry. +type Connection interface { + // ImageTags will return a map of the tags for the image by namespace and name. + // If namespace is not specified, will default to "library" for Docker hub. + ImageTags(namespace, name string) (map[string]string, error) + // ImageByID will return the requested image by namespace, name, and ID. + // If namespace is not specified, will default to "library" for Docker hub. + ImageByID(namespace, name, id string) (*Image, error) + // ImageByTag will return the requested image by namespace, name, and tag + // (if not specified, "latest"). + // If namespace is not specified, will default to "library" for Docker hub. + ImageByTag(namespace, name, tag string) (*Image, error) + // ImageManifest will return the raw image manifest and digest by namespace, + // name, and tag. + ImageManifest(namespace, name, tag string) (string, []byte, error) +} + +// client implements the Client interface +type client struct { + dialTimeout time.Duration + connections map[string]*connection + allowV2 bool +} + +// NewClient returns a client object which allows public access to +// a Docker registry. enableV2 allows a client to prefer V1 registry +// API connections. +// TODO: accept a godockerclient auth config +func NewClient(dialTimeout time.Duration, allowV2 bool) Client { + return &client{ + dialTimeout: dialTimeout, + connections: make(map[string]*connection), + allowV2: allowV2, + } +} + +// Connect accepts the name of a registry in the common form Docker provides and will +// create a connection to the registry. Callers may provide a host, a host:port, or +// a fully qualified URL. When not providing a URL, the default scheme will be "https" +func (c *client) Connect(name string, allowInsecure bool) (Connection, error) { + target, err := normalizeRegistryName(name) + if err != nil { + return nil, err + } + prefix := target.String() + if conn, ok := c.connections[prefix]; ok && conn.allowInsecure == allowInsecure { + return conn, nil + } + conn := newConnection(*target, c.dialTimeout, allowInsecure, c.allowV2) + c.connections[prefix] = conn + return conn, nil +} + +// normalizeDockerHubHost returns the canonical DockerHub registry URL for a given host +// segment and godockerclient API version. +func normalizeDockerHubHost(host string, v2 bool) string { + switch host { + case reference.DockerDefaultRegistry, "www." + reference.DockerDefaultRegistry, reference.DockerDefaultV1Registry, reference.DockerDefaultV2Registry: + if v2 { + return reference.DockerDefaultV2Registry + } + return reference.DockerDefaultV1Registry + } + return host +} + +// normalizeRegistryName standardizes the registry URL so that it is consistent +// across different versions of the same name (for reuse of auth). +func normalizeRegistryName(name string) (*url.URL, error) { + prefix := name + if len(prefix) == 0 { + prefix = reference.DockerDefaultV1Registry + } + hadPrefix := false + switch { + case strings.HasPrefix(prefix, "http://"), strings.HasPrefix(prefix, "https://"): + hadPrefix = true + default: + prefix = "https://" + prefix + } + + target, err := url.Parse(prefix) + if err != nil { + return nil, fmt.Errorf("the registry name cannot be made into a valid url: %v", err) + } + + if host, port, err := net.SplitHostPort(target.Host); err == nil { + host = normalizeDockerHubHost(host, false) + if hadPrefix { + switch { + case port == "443" && target.Scheme == "https": + target.Host = host + case port == "80" && target.Scheme == "http": + target.Host = host + } + } + } else { + target.Host = normalizeDockerHubHost(target.Host, false) + } + return target, nil +} + +// convertConnectionError turns a registry error into a typed error if appropriate. +func convertConnectionError(registry string, err error) error { + switch { + case strings.Contains(err.Error(), "connection refused"): + return errRegistryNotFound{registry} + default: + return err + } +} + +// connection represents a connection to a particular DockerHub registry, reusing +// tokens and other settings. connections are not thread safe. +type connection struct { + client *http.Client + url url.URL + cached map[string]repository + isV2 *bool + token string + + allowInsecure bool +} + +// newConnection creates a new connection +func newConnection(url url.URL, dialTimeout time.Duration, allowInsecure, enableV2 bool) *connection { + var isV2 *bool + if !enableV2 { + v2 := false + isV2 = &v2 + } + + var rt http.RoundTripper + if allowInsecure { + rt = knet.SetTransportDefaults(&http.Transport{ + Dial: (&net.Dialer{ + Timeout: dialTimeout, + KeepAlive: 30 * time.Second, + }).Dial, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }) + } else { + rt = knet.SetTransportDefaults(&http.Transport{ + Dial: (&net.Dialer{ + Timeout: dialTimeout, + KeepAlive: 30 * time.Second, + }).Dial, + }) + } + + rt = transport.DebugWrappers(rt) + + jar, _ := cookiejar.New(nil) + client := &http.Client{Jar: jar, Transport: rt} + return &connection{ + url: url, + client: client, + cached: make(map[string]repository), + isV2: isV2, + + allowInsecure: allowInsecure, + } +} + +// ImageTags returns the tags for the named Docker image repository. +func (c *connection) ImageTags(namespace, name string) (map[string]string, error) { + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + if len(name) == 0 { + return nil, fmt.Errorf("image name must be specified") + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return nil, err + } + + return repo.getTags(c) +} + +// ImageByID returns the specified image within the named Docker image repository +func (c *connection) ImageByID(namespace, name, imageID string) (*Image, error) { + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + if len(name) == 0 { + return nil, fmt.Errorf("image name must be specified") + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return nil, err + } + + image, _, err := repo.getImage(c, imageID, "") + return image, err +} + +// ImageByTag returns the specified image within the named Docker image repository +func (c *connection) ImageByTag(namespace, name, tag string) (*Image, error) { + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + if len(name) == 0 { + return nil, fmt.Errorf("image name must be specified") + } + searchTag := tag + if len(searchTag) == 0 { + searchTag = "latest" + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return nil, err + } + + image, _, err := repo.getTaggedImage(c, searchTag, tag) + return image, err +} + +// ImageManifest returns raw manifest of the specified image within the named Docker image repository +func (c *connection) ImageManifest(namespace, name, tag string) (string, []byte, error) { + if len(name) == 0 { + return "", nil, fmt.Errorf("image name must be specified") + } + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + searchTag := tag + if len(searchTag) == 0 { + searchTag = "latest" + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return "", nil, err + } + + image, manifest, err := repo.getTaggedImage(c, searchTag, tag) + if err != nil { + return "", nil, err + } + return image.Image.ID, manifest, err +} + +// getCachedRepository returns a repository interface matching the provided name and +// may cache information about the server on the connection object. +func (c *connection) getCachedRepository(name string) (repository, error) { + if cached, ok := c.cached[name]; ok { + return cached, nil + } + + if c.isV2 == nil { + v2, err := c.checkV2() + if err != nil { + return nil, err + } + c.isV2 = &v2 + } + if *c.isV2 { + base := c.url + base.Host = normalizeDockerHubHost(base.Host, true) + repo := &v2repository{ + name: name, + endpoint: base, + token: c.token, + } + c.cached[name] = repo + return repo, nil + } + + repo, err := c.getRepositoryV1(name) + if err != nil { + return nil, err + } + c.cached[name] = repo + return repo, nil +} + +// checkV2 performs the registry version checking steps as described by +// https://docs.docker.com/registry/spec/api/ +func (c *connection) checkV2() (bool, error) { + base := c.url + base.Host = normalizeDockerHubHost(base.Host, true) + base.Path = path.Join(base.Path, "v2") + "/" + req, err := http.NewRequest("GET", base.String(), nil) + if err != nil { + return false, fmt.Errorf("error creating request: %v", err) + } + resp, err := c.client.Do(req) + if err != nil { + // if we tried https and were rejected, try http + if c.url.Scheme == "https" && c.allowInsecure { + klog.V(4).Infof("Failed to get https, trying http: %v", err) + c.url.Scheme = "http" + return c.checkV2() + } + return false, convertConnectionError(c.url.String(), fmt.Errorf("error checking for V2 registry at %s: %v", base.String(), err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + // handle auth challenges on individual repositories + case code >= 300 || resp.StatusCode < 200: + return false, nil + } + if len(resp.Header.Get("Docker-Distribution-API-Version")) == 0 { + klog.V(5).Infof("Registry v2 API at %s did not have a Docker-Distribution-API-Version header", base.String()) + return false, nil + } + + klog.V(5).Infof("Found registry v2 API at %s", base.String()) + return true, nil +} + +// parseAuthChallenge splits a header of the form 'type[ =""[,...]]' returned +// by the godockerclient registry +func parseAuthChallenge(header string) (string, map[string]string) { + sections := strings.SplitN(header, " ", 2) + if len(sections) == 1 { + sections = append(sections, "") + } + challenge := sections[1] + keys := make(map[string]string) + for _, s := range strings.Split(challenge, ",") { + pair := strings.SplitN(strings.TrimSpace(s), "=", 2) + if len(pair) == 1 { + keys[pair[0]] = "" + continue + } + keys[pair[0]] = strings.Trim(pair[1], "\"") + } + return sections[0], keys +} + +// authenticateV2 attempts to respond to a given WWW-Authenticate challenge header +// by asking for a token from the realm. Currently only supports "Bearer" challenges +// with no credentials provided. +// TODO: support credentials or replace with the Docker distribution v2 registry client +func (c *connection) authenticateV2(header string) (string, error) { + mode, keys := parseAuthChallenge(header) + if strings.ToLower(mode) != "bearer" { + return "", fmt.Errorf("unsupported authentication challenge from registry: %s", header) + } + + realm, ok := keys["realm"] + if !ok { + return "", fmt.Errorf("no realm specified by the server, cannot authenticate: %s", header) + } + delete(keys, "realm") + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("realm %q was not a valid url: %v", realm, err) + } + query := realmURL.Query() + for k, v := range keys { + query.Set(k, v) + } + realmURL.RawQuery = query.Encode() + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", fmt.Errorf("error creating v2 auth request: %v", err) + } + + resp, err := c.client.Do(req) + if err != nil { + return "", convertConnectionError(realmURL.String(), fmt.Errorf("error authorizing to the registry: %v", err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + return "", fmt.Errorf("permission denied to access realm %q", realmURL.String()) + case code == http.StatusNotFound: + return "", fmt.Errorf("defined realm %q cannot be found", realm) + case code >= 300 || resp.StatusCode < 200: + return "", fmt.Errorf("error authenticating to the realm %q; server returned %d", realmURL.String(), resp.StatusCode) + } + + token := struct { + Token string `json:"token"` + }{} + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("can't read authorization body from %s: %v", realmURL.String(), err) + } + if err := json.Unmarshal(body, &token); err != nil { + return "", fmt.Errorf("can't decode the server authorization from %s: %v", realmURL.String(), err) + } + return token.Token, nil +} + +// getRepositoryV1 returns a repository implementation for a v1 registry by asking for +// the appropriate endpoint token. It will try HTTP if HTTPS fails and insecure connections +// are allowed. +func (c *connection) getRepositoryV1(name string) (repository, error) { + klog.V(4).Infof("Getting repository %s from %s", name, c.url.String()) + + base := c.url + base.Path = path.Join(base.Path, fmt.Sprintf("/v1/repositories/%s/images", name)) + req, err := http.NewRequest("GET", base.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Add("X-Docker-Token", "true") + resp, err := c.client.Do(req) + if err != nil { + // if we tried https and were rejected, try http + if c.url.Scheme == "https" && c.allowInsecure { + klog.V(4).Infof("Failed to get https, trying http: %v", err) + c.url.Scheme = "http" + return c.getRepositoryV1(name) + } + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting X-Docker-Token from %s: %v", name, err)) + } + defer resp.Body.Close() + + // if we were redirected, update the base urls + c.url.Scheme = resp.Request.URL.Scheme + c.url.Host = resp.Request.URL.Host + + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + return nil, errRepositoryNotFound{name} + case code >= 300 || resp.StatusCode < 200: + return nil, fmt.Errorf("error retrieving repository: server returned %d", resp.StatusCode) + } + + // TODO: select a random endpoint + return &v1repository{ + name: name, + endpoint: url.URL{Scheme: c.url.Scheme, Host: resp.Header.Get("X-Docker-Endpoints")}, + token: resp.Header.Get("X-Docker-Token"), + }, nil +} + +// repository is an interface for retrieving image info from a Docker V1 or V2 repository. +type repository interface { + getTags(c *connection) (map[string]string, error) + getTaggedImage(c *connection, tag, userTag string) (*Image, []byte, error) + getImage(c *connection, image, userTag string) (*Image, []byte, error) +} + +// v2repository exposes methods for accessing a named Docker V2 repository on a server. +type v2repository struct { + name string + endpoint url.URL + token string + retries int +} + +// v2tags describes the tags/list returned by the Docker V2 registry. +type v2tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +func (repo *v2repository) getTags(c *connection) (map[string]string, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v2/%s/tags/list", repo.name)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + addAcceptHeader(req) + + if len(repo.token) > 0 { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", repo.token)) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image tags for %s: %v", repo.name, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + if len(repo.token) != 0 { + // The DockerHub returns JWT tokens that take effect at "now" at second resolution, which means clients can + // be rejected when requests are made near the time boundary. + if repo.retries > 0 { + repo.retries-- + time.Sleep(time.Second / 2) + return repo.getTags(c) + } + delete(c.cached, repo.name) + // godockerclient will not return a NotFound on any repository URL - for backwards compatibility, return NotFound on the + // repo + return nil, errRepositoryNotFound{repo.name} + } + token, err := c.authenticateV2(resp.Header.Get("WWW-Authenticate")) + if err != nil { + return nil, fmt.Errorf("error getting image tags for %s: %v", repo.name, err) + } + repo.retries = 2 + repo.token = token + return repo.getTags(c) + + case code == http.StatusNotFound: + return nil, errRepositoryNotFound{repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + return nil, fmt.Errorf("error retrieving tags: server returned %d", resp.StatusCode) + } + tags := &v2tags{} + if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + return nil, fmt.Errorf("error decoding image %s tags: %v", repo.name, err) + } + legacyTags := make(map[string]string) + for _, tag := range tags.Tags { + legacyTags[tag] = tag + } + return legacyTags, nil +} + +func (repo *v2repository) getTaggedImage(c *connection, tag, userTag string) (*Image, []byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v2/%s/manifests/%s", repo.name, tag)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + addAcceptHeader(req) + + if len(repo.token) > 0 { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", repo.token)) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image for %s:%s: %v", repo.name, tag, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + if len(repo.token) != 0 { + // The DockerHub returns JWT tokens that take effect at "now" at second resolution, which means clients can + // be rejected when requests are made near the time boundary. + if repo.retries > 0 { + repo.retries-- + time.Sleep(time.Second / 2) + return repo.getTaggedImage(c, tag, userTag) + } + delete(c.cached, repo.name) + // godockerclient will not return a NotFound on any repository URL - for backwards compatibility, return NotFound on the + // repo + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("passed valid auth token, but unable to find tagged image at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, nil, errTagNotFound{len(userTag) == 0, tag, repo.name} + } + token, err := c.authenticateV2(resp.Header.Get("WWW-Authenticate")) + if err != nil { + return nil, nil, fmt.Errorf("error getting image for %s:%s: %v", repo.name, tag, err) + } + repo.retries = 2 + repo.token = token + return repo.getTaggedImage(c, tag, userTag) + case code == http.StatusNotFound: + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("unable to find tagged image at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, nil, errTagNotFound{len(userTag) == 0, tag, repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, nil, fmt.Errorf("error retrieving tagged image: server returned %d", resp.StatusCode) + } + + digest := resp.Header.Get("Docker-Content-Digest") + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, nil, fmt.Errorf("can't read image body from %s: %v", req.URL, err) + } + dockerImage, err := repo.unmarshalImageManifest(c, body) + if err != nil { + return nil, nil, err + } + image := &Image{ + Image: *dockerImage, + } + if len(digest) > 0 { + image.Image.ID = digest + image.PullByID = true + } + return image, body, nil +} + +func (repo *v2repository) getImage(c *connection, image, userTag string) (*Image, []byte, error) { + return repo.getTaggedImage(c, image, userTag) +} + +func (repo *v2repository) getImageConfig(c *connection, dgst string) ([]byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v2/%s/blobs/%s", repo.name, dgst)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + + if len(repo.token) > 0 { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", repo.token)) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image config for %s: %v", repo.name, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + if len(repo.token) != 0 { + // The DockerHub returns JWT tokens that take effect at "now" at second resolution, which means clients can + // be rejected when requests are made near the time boundary. + if repo.retries > 0 { + repo.retries-- + time.Sleep(time.Second / 2) + return repo.getImageConfig(c, dgst) + } + delete(c.cached, repo.name) + // godockerclient will not return a NotFound on any repository URL - for backwards compatibility, return NotFound on the + // repo + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("passed valid auth token, but unable to find image config at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, errBlobNotFound{dgst, repo.name} + } + token, err := c.authenticateV2(resp.Header.Get("WWW-Authenticate")) + if err != nil { + return nil, fmt.Errorf("error getting image config for %s:%s: %v", repo.name, dgst, err) + } + repo.retries = 2 + repo.token = token + return repo.getImageConfig(c, dgst) + case code == http.StatusNotFound: + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("unable to find image config at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, errBlobNotFound{dgst, repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, fmt.Errorf("error retrieving image config: server returned %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("can't read image body from %s: %v", req.URL, err) + } + + return body, nil +} + +func (repo *v2repository) unmarshalImageManifest(c *connection, body []byte) (*godockerclient.Image, error) { + manifest := DockerImageManifest{} + if err := json.Unmarshal(body, &manifest); err != nil { + return nil, err + } + switch manifest.SchemaVersion { + case 1: + if len(manifest.History) == 0 { + return nil, fmt.Errorf("image has no v1Compatibility history and cannot be used") + } + return unmarshalDockerImage([]byte(manifest.History[0].DockerV1Compatibility)) + case 2: + config, err := repo.getImageConfig(c, manifest.Config.Digest) + if err != nil { + return nil, err + } + return unmarshalDockerImage(config) + } + return nil, fmt.Errorf("unrecognized Docker image manifest schema %d", manifest.SchemaVersion) +} + +// v1repository exposes methods for accessing a named Docker V1 repository on a server. +type v1repository struct { + name string + endpoint url.URL + token string +} + +func (repo *v1repository) getTags(c *connection) (map[string]string, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v1/repositories/%s/tags", repo.name)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Add("Authorization", "Token "+repo.token) + resp, err := c.client.Do(req) + if err != nil { + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image tags for %s: %v", repo.name, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + return nil, errRepositoryNotFound{repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, fmt.Errorf("error retrieving tags: server returned %d", resp.StatusCode) + } + tags := make(map[string]string) + if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + return nil, fmt.Errorf("error decoding image %s tags: %v", repo.name, err) + } + return tags, nil +} + +func (repo *v1repository) getTaggedImage(c *connection, tag, userTag string) (*Image, []byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v1/repositories/%s/tags/%s", repo.name, tag)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Add("Authorization", "Token "+repo.token) + resp, err := c.client.Do(req) + if err != nil { + return nil, nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image id for %s:%s: %v", repo.name, tag, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + // Attempt to lookup tag in tags map, supporting registries that don't allow retrieval + // of tags to ids (Pulp/Crane) + allTags, err := repo.getTags(c) + if err != nil { + return nil, nil, err + } + if image, ok := allTags[tag]; ok { + return repo.getImage(c, image, "") + } + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("unable to find v1 tagged image at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, nil, errTagNotFound{len(userTag) == 0, tag, repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, nil, fmt.Errorf("error retrieving tag: server returned %d", resp.StatusCode) + } + var imageID string + if err := json.NewDecoder(resp.Body).Decode(&imageID); err != nil { + return nil, nil, fmt.Errorf("error decoding image id: %v", err) + } + return repo.getImage(c, imageID, "") +} + +func (repo *v1repository) getImage(c *connection, image, userTag string) (*Image, []byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v1/images/%s/json", image)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + + if len(repo.token) > 0 { + req.Header.Add("Authorization", "Token "+repo.token) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting json for image %q: %v", image, err)) + } + defer resp.Body.Close() + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + return nil, nil, NewImageNotFoundError(repo.name, image, userTag) + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + if body, err := ioutil.ReadAll(resp.Body); err == nil { + klog.V(6).Infof("unable to fetch image %s: %#v\n%s", req.URL, resp, string(body)) + } + return nil, nil, fmt.Errorf("error retrieving image %s: server returned %d", req.URL, resp.StatusCode) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, nil, fmt.Errorf("can't read image body from %s: %v", req.URL, err) + } + dockerImage, err := unmarshalDockerImage(body) + if err != nil { + return nil, nil, err + } + return &Image{Image: *dockerImage}, body, nil +} + +// errBlobNotFound is an error indicating the requested blob does not exist in the repository. +type errBlobNotFound struct { + digest string + repository string +} + +func (e errBlobNotFound) Error() string { + return fmt.Sprintf("blob %s was not found in repository %q", e.digest, e.repository) +} + +// errTagNotFound is an error indicating the requested tag does not exist on the server. May be returned on +// a v2 repository when the repository does not exist (because the v2 registry returns 401 on any repository +// you do not have permission to see, or does not exist) +type errTagNotFound struct { + wasDefault bool + tag string + repository string +} + +func (e errTagNotFound) Error() string { + if e.wasDefault { + return fmt.Sprintf("the default tag %q has not been set on repository %q", e.tag, e.repository) + } + return fmt.Sprintf("tag %q has not been set on repository %q", e.tag, e.repository) +} + +// errRepositoryNotFound indicates the repository is not found - but is only guaranteed to be returned +// for v1 godockerclient registries. +type errRepositoryNotFound struct { + repository string +} + +func (e errRepositoryNotFound) Error() string { + return fmt.Sprintf("the repository %q was not found", e.repository) +} + +type errImageNotFound struct { + tag string + image string + repository string +} + +func NewImageNotFoundError(repository, image, tag string) error { + return errImageNotFound{tag, image, repository} +} + +func (e errImageNotFound) Error() string { + if len(e.tag) == 0 { + return fmt.Sprintf("the image %q in repository %q was not found and may have been deleted", e.image, e.repository) + } + return fmt.Sprintf("the image %q in repository %q with tag %q was not found and may have been deleted", e.image, e.repository, e.tag) +} + +type errRegistryNotFound struct { + registry string +} + +func (e errRegistryNotFound) Error() string { + return fmt.Sprintf("the registry %q could not be reached", e.registry) +} + +func IsRegistryNotFound(err error) bool { + _, ok := err.(errRegistryNotFound) + return ok +} + +func IsRepositoryNotFound(err error) bool { + _, ok := err.(errRepositoryNotFound) + return ok +} + +func IsImageNotFound(err error) bool { + _, ok := err.(errImageNotFound) + return ok +} + +func IsTagNotFound(err error) bool { + _, ok := err.(errTagNotFound) + return ok +} + +func IsBlobNotFound(err error) bool { + _, ok := err.(errBlobNotFound) + return ok +} + +func IsNotFound(err error) bool { + return IsRegistryNotFound(err) || IsRepositoryNotFound(err) || IsImageNotFound(err) || IsTagNotFound(err) || IsBlobNotFound(err) +} + +func unmarshalDockerImage(body []byte) (*godockerclient.Image, error) { + var imagePre012 godockerclient.ImagePre012 + if err := json.Unmarshal(body, &imagePre012); err != nil { + return nil, err + } + + return &godockerclient.Image{ + ID: imagePre012.ID, + Parent: imagePre012.Parent, + Comment: imagePre012.Comment, + Created: imagePre012.Created, + Container: imagePre012.Container, + ContainerConfig: imagePre012.ContainerConfig, + DockerVersion: imagePre012.DockerVersion, + Author: imagePre012.Author, + Config: imagePre012.Config, + Architecture: imagePre012.Architecture, + Size: imagePre012.Size, + }, nil +} + +func addAcceptHeader(r *http.Request) { + r.Header.Add("Accept", schema1.MediaTypeManifest) + r.Header.Add("Accept", schema2.MediaTypeManifest) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client_test.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client_test.go new file mode 100644 index 00000000000..d0de6b263cf --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client_test.go @@ -0,0 +1,399 @@ +package dockerv1client + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" +) + +// tests of running registries are done in the integration client test + +func TestHTTPFallback(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/tags") { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + w.WriteHeader(http.StatusOK) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); !IsRepositoryNotFound(err) { + t.Error(err) + } + <-called + <-called +} + +func TestV2Check(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/v2/") { + w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") + w.WriteHeader(http.StatusOK) + return + } + if strings.HasSuffix(r.URL.Path, "/tags/list") { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, `{"tags":["tag1","image1"]}`) + return + } + t.Fatalf("unexpected request: %s %s", r.Method, r.URL.RequestURI()) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + tags, err := conn.ImageTags("foo", "bar") + if err != nil { + t.Fatal(err) + } + if tags["tag1"] != "tag1" { + t.Errorf("unexpected tags: %#v", tags) + } + if tags["image1"] != "image1" { + t.Errorf("unexpected tags: %#v", tags) + } + + <-called + <-called +} + +func TestV2CheckNoDistributionHeader(t *testing.T) { + called := make(chan struct{}, 3) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/v2/") { + w.Header().Set("Docker-Distribution-API-Version", "") + w.WriteHeader(http.StatusOK) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + + // Images + if strings.HasSuffix(r.URL.Path, "/images") { + return + } + + // ImageTags + if strings.HasSuffix(r.URL.Path, "/tags") { + fmt.Fprintln(w, `{"tag1":"image1"}`) + return + } + + // get tag->image id + if strings.HasSuffix(r.URL.Path, "latest") { + fmt.Fprintln(w, `"image1"`) + return + } + + // get image json + if strings.HasSuffix(r.URL.Path, "json") { + fmt.Fprintln(w, `{"id":"image1"}`) + return + } + t.Fatalf("unexpected request: %s %s", r.Method, r.URL.RequestURI()) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + tags, err := conn.ImageTags("foo", "bar") + if err != nil { + t.Fatal(err) + } + if tags["tag1"] != "image1" { + t.Errorf("unexpected tags: %#v", tags) + } + + <-called + <-called + <-called +} + +func TestInsecureHTTPS(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/tags") { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + w.WriteHeader(http.StatusOK) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); !IsRepositoryNotFound(err) { + t.Error(err) + } + <-called + <-called +} + +func TestProxy(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/tags") { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + w.WriteHeader(http.StatusOK) + })) + os.Setenv("HTTP_PROXY", "http.proxy.tld") + os.Setenv("HTTPS_PROXY", "secure.proxy.tld") + os.Setenv("NO_PROXY", "") + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); !IsRepositoryNotFound(err) { + t.Error(err) + } + <-called + <-called +} + +func TestTokenExpiration(t *testing.T) { + var uri *url.URL + lastToken := "" + tokenIndex := 0 + validToken := "" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Docker-Token") == "true" { + tokenIndex++ + lastToken = fmt.Sprintf("token%d", tokenIndex) + validToken = lastToken + w.Header().Set("X-Docker-Token", lastToken) + w.Header().Set("X-Docker-Endpoints", uri.Host) + return + } + + auth := r.Header.Get("Authorization") + parts := strings.Split(auth, " ") + token := parts[1] + if token != validToken { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.WriteHeader(http.StatusOK) + + // ImageTags + if strings.HasSuffix(r.URL.Path, "/tags") { + fmt.Fprintln(w, `{"tag1":"image1"}`) + } + + // get tag->image id + if strings.HasSuffix(r.URL.Path, "latest") { + fmt.Fprintln(w, `"image1"`) + } + + // get image json + if strings.HasSuffix(r.URL.Path, "json") { + fmt.Fprintln(w, `{"id":"image1"}`) + } + })) + + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); err != nil { + t.Fatal(err) + } + + // expire token, should get an error + validToken = "" + if _, err := conn.ImageTags("foo", "bar"); err == nil { + t.Fatal("expected error") + } + // retry, should get a new token + if _, err := conn.ImageTags("foo", "bar"); err != nil { + t.Fatal(err) + } + + // expire token, should get an error + validToken = "" + if _, err := conn.ImageByTag("foo", "bar", "latest"); err == nil { + t.Fatal("expected error") + } + // retry, should get a new token + if _, err := conn.ImageByTag("foo", "bar", "latest"); err != nil { + t.Fatal(err) + } + + // expire token, should get an error + validToken = "" + if _, err := conn.ImageByID("foo", "bar", "image1"); err == nil { + t.Fatal("expected error") + } + // retry, should get a new token + if _, err := conn.ImageByID("foo", "bar", "image1"); err != nil { + t.Fatal(err) + } +} + +func TestGetTagFallback(t *testing.T) { + var uri *url.URL + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Docker-Endpoints", uri.Host) + + // get all tags + if strings.HasSuffix(r.URL.Path, "/tags") { + fmt.Fprintln(w, `{"tag1":"image1", "test":"image2"}`) + w.WriteHeader(http.StatusOK) + return + } + if strings.HasSuffix(r.URL.Path, "/json") { + fmt.Fprintln(w, `{"ID":"image2"}`) + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + c := conn.(*connection) + if err != nil { + t.Fatal(err) + } + repo := &v1repository{ + name: "testrepo", + endpoint: *uri, + } + // Case when tag is found + img, _, err := repo.getTaggedImage(c, "test", "") + if err != nil { + t.Errorf("unexpected error getting tag: %v", err) + return + } + if img.Image.ID != "image2" { + t.Errorf("unexpected image for tag: %v", img) + } + // Case when tag is not found + img, _, err = repo.getTaggedImage(c, "test2", "") + if err == nil { + t.Errorf("expected error") + } +} + +func TestImageManifest(t *testing.T) { + manifestDigest := "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238" + + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + t.Logf("got %s %s", r.Method, r.URL.Path) + switch r.URL.Path { + case "/v2/": + w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") + w.Write([]byte(`{}`)) + case "/v2/test/image/manifests/latest", "/v2/test/image/manifests/" + manifestDigest: + if r.Method == "HEAD" { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(SampleImageManifestSchema1))) + w.Header().Set("Docker-Content-Digest", manifestDigest) + w.WriteHeader(http.StatusOK) + return + } + w.Write([]byte(SampleImageManifestSchema1)) + default: + t.Fatalf("unexpected request: %s %s", r.Method, r.URL.RequestURI()) + return + } + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + _, manifest, err := conn.ImageManifest("test", "image", "latest") + if err != nil { + t.Fatal(err) + } + if len(manifest) == 0 { + t.Errorf("empty manifest") + } + + if string(manifest) != SampleImageManifestSchema1 { + t.Errorf("unexpected manifest: %#v", manifest) + } + + <-called + <-called +} + +const SampleImageManifestSchema1 = `{ + "schemaVersion": 1, + "name": "nm/is", + "tag": "latest", + "architecture": "", + "fsLayers": [ + { + "blobSum": "sha256:b2c5513bd934a7efb412c0dd965600b8cb00575b585eaff1cb980b69037fe6cd" + }, + { + "blobSum": "sha256:2dde6f11a89463bf20dba3b47d8b3b6de7cdcc19e50634e95a18dd95c278768d" + } + ], + "history": [ + { + "v1Compatibility": "{\"size\":18407936}" + }, + { + "v1Compatibility": "{\"size\":19387392}" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "5HTY:A24B:L6PG:TQ3G:GMAK:QGKZ:ICD4:S7ZJ:P5JX:UTMP:XZLK:ZXVH", + "kty": "EC", + "x": "j5YnDSyrVIt3NquUKvcZIpbfeD8HLZ7BVBFL4WutRBM", + "y": "PBgFAZ3nNakYN3H9enhrdUrQ_HPYzb8oX5rtJxJo1Y8" + }, + "alg": "ES256" + }, + "signature": "1rXiEmWnf9eL7m7Wy3K4l25-Zv2XXl5GgqhM_yjT0ujPmTn0uwfHcCWlweHa9gput3sECj507eQyGpBOF5rD6Q", + "protected": "eyJmb3JtYXRMZW5ndGgiOjQ4NSwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE2LTA3LTI2VDExOjQ2OjQ2WiJ9" + } + ] +}` diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/conversion.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/conversion.go new file mode 100644 index 00000000000..c5161d4fe9f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/conversion.go @@ -0,0 +1,26 @@ +package dockerv1client + +import "github.com/openshift/api/image/docker10" + +// Convert_DockerV1CompatibilityImage_to_DockerImageConfig takes a Docker registry digest +// (schema 2.1) and converts it to the external API version of Image. +func Convert_DockerV1CompatibilityImage_to_DockerImageConfig(in *DockerV1CompatibilityImage, out *DockerImageConfig) error { + *out = DockerImageConfig{ + ID: in.ID, + Parent: in.Parent, + Comment: in.Comment, + Created: in.Created, + Container: in.Container, + DockerVersion: in.DockerVersion, + Author: in.Author, + Architecture: in.Architecture, + Size: in.Size, + OS: "linux", + ContainerConfig: in.ContainerConfig, + } + if in.Config != nil { + out.Config = &docker10.DockerConfig{} + *out.Config = *in.Config + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/types.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/types.go new file mode 100644 index 00000000000..3b85b81e0be --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/types.go @@ -0,0 +1,113 @@ +package dockerv1client + +import ( + "time" + + "github.com/openshift/api/image/docker10" +) + +// TODO: Move these to openshift/api + +// DockerImageManifest represents the Docker v2 image format. +type DockerImageManifest struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType,omitempty"` + + // schema1 + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []DockerFSLayer `json:"fsLayers"` + History []DockerHistory `json:"history"` + + // schema2 + Layers []Descriptor `json:"layers"` + Config Descriptor `json:"config"` +} + +// DockerFSLayer is a container struct for BlobSums defined in an image manifest +type DockerFSLayer struct { + // DockerBlobSum is the tarsum of the referenced filesystem image layer + // TODO make this digest.Digest once docker/distribution is in Godeps + DockerBlobSum string `json:"blobSum"` +} + +// DockerHistory stores unstructured v1 compatibility information +type DockerHistory struct { + // DockerV1Compatibility is the raw v1 compatibility information + DockerV1Compatibility string `json:"v1Compatibility"` +} + +// DockerV1CompatibilityImage represents the structured v1 +// compatibility information. +type DockerV1CompatibilityImage struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig docker10.DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *docker10.DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// DockerV1CompatibilityImageSize represents the structured v1 +// compatibility information for size +type DockerV1CompatibilityImageSize struct { + Size int64 `json:"size,omitempty"` +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest string `json:"digest,omitempty"` +} + +// DockerImageConfig stores the image configuration +type DockerImageConfig struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig docker10.DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *docker10.DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` + RootFS *DockerConfigRootFS `json:"rootfs,omitempty"` + History []DockerConfigHistory `json:"history,omitempty"` + OS string `json:"os,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// DockerConfigHistory stores build commands that were used to create an image +type DockerConfigHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// DockerConfigRootFS describes images root filesystem +type DockerConfigRootFS struct { + Type string `json:"type"` + DiffIDs []string `json:"diff_ids,omitempty"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go new file mode 100644 index 00000000000..d35c052f37c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go @@ -0,0 +1,379 @@ +package imageutil + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/blang/semver" + + "github.com/openshift/api/image/docker10" + imagev1 "github.com/openshift/api/image/v1" + digestinternal "github.com/openshift/library-go/pkg/image/internal/digest" + imagereference "github.com/openshift/library-go/pkg/image/reference" +) + +const ( + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" +) + +var ParseDigest = digestinternal.ParseDigest + +// SplitImageStreamTag turns the name of an ImageStreamTag into Name and Tag. +// It returns false if the tag was not properly specified in the name. +func SplitImageStreamTag(nameAndTag string) (name string, tag string, ok bool) { + parts := strings.SplitN(nameAndTag, ":", 2) + name = parts[0] + if len(parts) > 1 { + tag = parts[1] + } + if len(tag) == 0 { + tag = DefaultImageTag + } + return name, tag, len(parts) == 2 +} + +// SplitImageStreamImage turns the name of an ImageStreamImage into Name and ID. +// It returns false if the ID was not properly specified in the name. +func SplitImageStreamImage(nameAndID string) (name string, id string, ok bool) { + parts := strings.SplitN(nameAndID, "@", 2) + name = parts[0] + if len(parts) > 1 { + id = parts[1] + } + return name, id, len(parts) == 2 +} + +// JoinImageStreamTag turns a name and tag into the name of an ImageStreamTag +func JoinImageStreamTag(name, tag string) string { + if len(tag) == 0 { + tag = DefaultImageTag + } + return fmt.Sprintf("%s:%s", name, tag) +} + +// JoinImageStreamImage creates a name for image stream image object from an image stream name and an id. +func JoinImageStreamImage(name, id string) string { + return fmt.Sprintf("%s@%s", name, id) +} + +// ParseImageStreamTagName splits a string into its name component and tag component, and returns an error +// if the string is not in the right form. +func ParseImageStreamTagName(istag string) (name string, tag string, err error) { + if strings.Contains(istag, "@") { + err = fmt.Errorf("%q is an image stream image, not an image stream tag", istag) + return + } + segments := strings.SplitN(istag, ":", 3) + switch len(segments) { + case 2: + name = segments[0] + tag = segments[1] + if len(name) == 0 || len(tag) == 0 { + err = fmt.Errorf("image stream tag name %q must have a name and a tag", istag) + } + default: + err = fmt.Errorf("expected exactly one : delimiter in the istag %q", istag) + } + return +} + +// ParseImageStreamImageName splits a string into its name component and ID component, and returns an error +// if the string is not in the right form. +func ParseImageStreamImageName(input string) (name string, id string, err error) { + segments := strings.SplitN(input, "@", 3) + switch len(segments) { + case 2: + name = segments[0] + id = segments[1] + if len(name) == 0 || len(id) == 0 { + err = fmt.Errorf("image stream image name %q must have a name and ID", input) + } + default: + err = fmt.Errorf("expected exactly one @ in the isimage name %q", input) + } + return +} + +var ( + reMinorSemantic = regexp.MustCompile(`^[\d]+\.[\d]+$`) + reMinorWithPatch = regexp.MustCompile(`^([\d]+\.[\d]+)-\w+$`) +) + +type tagPriority int + +const ( + // the "latest" tag + tagPriorityLatest tagPriority = iota + + // a semantic minor version ("5.1", "v5.1", "v5.1-rc1") + tagPriorityMinor + + // a full semantic version ("5.1.3-other", "v5.1.3-other") + tagPriorityFull + + // other tags + tagPriorityOther +) + +type prioritizedTag struct { + tag string + priority tagPriority + semver semver.Version + prefix string +} + +func prioritizeTag(tag string) prioritizedTag { + if tag == "latest" { + return prioritizedTag{ + tag: tag, + priority: tagPriorityLatest, + } + } + + short := tag + prefix := "" + if strings.HasPrefix(tag, "v") { + prefix = "v" + short = tag[1:] + } + + // 5.1.3 + if v, err := semver.Parse(short); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityFull, + semver: v, + prefix: prefix, + } + } + + // 5.1 + if reMinorSemantic.MatchString(short) { + if v, err := semver.Parse(short + ".0"); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityMinor, + semver: v, + prefix: prefix, + } + } + } + + // 5.1-rc1 + if match := reMinorWithPatch.FindStringSubmatch(short); match != nil { + if v, err := semver.Parse(strings.Replace(short, match[1], match[1]+".0", 1)); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityMinor, + semver: v, + prefix: prefix, + } + } + } + + // other + return prioritizedTag{ + tag: tag, + priority: tagPriorityOther, + prefix: prefix, + } +} + +type prioritizedTags []prioritizedTag + +func (t prioritizedTags) Len() int { return len(t) } +func (t prioritizedTags) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t prioritizedTags) Less(i, j int) bool { + if t[i].priority != t[j].priority { + return t[i].priority < t[j].priority + } + + if t[i].priority == tagPriorityOther { + return t[i].tag < t[j].tag + } + + cmp := t[i].semver.Compare(t[j].semver) + if cmp > 0 { // the newer tag has a higher priority + return true + } + return cmp == 0 && t[i].prefix < t[j].prefix +} + +// PrioritizeTags orders a set of image tags with a few conventions: +// +// 1. the "latest" tag, if present, should be first +// 2. any tags that represent a semantic minor version ("5.1", "v5.1", "v5.1-rc1") should be next, in descending order +// 3. any tags that represent a full semantic version ("5.1.3-other", "v5.1.3-other") should be next, in descending order +// 4. any remaining tags should be sorted in lexicographic order +// +// The method updates the tags in place. +func PrioritizeTags(tags []string) { + ptags := make(prioritizedTags, len(tags)) + for i, tag := range tags { + ptags[i] = prioritizeTag(tag) + } + sort.Sort(ptags) + for i, pt := range ptags { + tags[i] = pt.tag + } +} + +// StatusHasTag returns named tag from image stream's status and boolean whether one was found. +func StatusHasTag(stream *imagev1.ImageStream, name string) (imagev1.NamedTagEventList, bool) { + for _, tag := range stream.Status.Tags { + if tag.Tag == name { + return tag, true + } + } + return imagev1.NamedTagEventList{}, false +} + +// LatestTaggedImage returns the most recent TagEvent for the specified image +// repository and tag. Will resolve lookups for the empty tag. Returns nil +// if tag isn't present in stream.status.tags. +func LatestTaggedImage(stream *imagev1.ImageStream, tag string) *imagev1.TagEvent { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + + // find the most recent tag event with an image reference + t, ok := StatusHasTag(stream, tag) + if ok { + if len(t.Items) == 0 { + return nil + } + return &t.Items[0] + } + + return nil +} + +// ImageWithMetadata mutates the given image. It parses raw DockerImageManifest data stored in the image and +// fills its DockerImageMetadata and other fields. +// Copied from github.com/openshift/image-registry/pkg/origin-common/util/util.go +func ImageWithMetadata(image *imagev1.Image) error { + // Check if the metadata are already filled in for this image. + meta, hasMetadata := image.DockerImageMetadata.Object.(*docker10.DockerImage) + if hasMetadata && meta.Size > 0 { + return nil + } + + version := image.DockerImageMetadataVersion + if len(version) == 0 { + version = "1.0" + } + + obj := &docker10.DockerImage{} + if len(image.DockerImageMetadata.Raw) != 0 { + if err := json.Unmarshal(image.DockerImageMetadata.Raw, obj); err != nil { + return err + } + image.DockerImageMetadata.Object = obj + } + + image.DockerImageMetadataVersion = version + + return nil +} + +func ImageWithMetadataOrDie(image *imagev1.Image) { + if err := ImageWithMetadata(image); err != nil { + panic(err) + } +} + +// ResolveLatestTaggedImage returns the appropriate pull spec for a given tag in +// the image stream, handling the tag's reference policy if necessary to return +// a resolved image. Callers that transform an ImageStreamTag into a pull spec +// should use this method instead of LatestTaggedImage. +func ResolveLatestTaggedImage(stream *imagev1.ImageStream, tag string) (string, bool) { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + return resolveTagReference(stream, tag, LatestTaggedImage(stream, tag)) +} + +// ResolveTagReference applies the tag reference rules for a stream, tag, and tag event for +// that tag. It returns true if the tag is +func resolveTagReference(stream *imagev1.ImageStream, tag string, latest *imagev1.TagEvent) (string, bool) { + if latest == nil { + return "", false + } + return resolveReferenceForTagEvent(stream, tag, latest), true +} + +// SpecHasTag returns named tag from image stream's spec and boolean whether one was found. +func SpecHasTag(stream *imagev1.ImageStream, name string) (imagev1.TagReference, bool) { + for _, tag := range stream.Spec.Tags { + if tag.Name == name { + return tag, true + } + } + return imagev1.TagReference{}, false +} + +// ResolveReferenceForTagEvent applies the tag reference rules for a stream, tag, and tag event for +// that tag. +func resolveReferenceForTagEvent(stream *imagev1.ImageStream, tag string, latest *imagev1.TagEvent) string { + // retrieve spec policy - if not found, we use the latest spec + ref, ok := SpecHasTag(stream, tag) + if !ok { + return latest.DockerImageReference + } + + switch ref.ReferencePolicy.Type { + // the local reference policy attempts to use image pull through on the integrated + // registry if possible + case imagev1.LocalTagReferencePolicy: + local := stream.Status.DockerImageRepository + if len(local) == 0 || len(latest.Image) == 0 { + // fallback to the originating reference if no local docker registry defined or we + // lack an image ID + return latest.DockerImageReference + } + + // we must use imageapi's helper since we're calling Exact later on, which produces string + ref, err := imagereference.Parse(local) + if err != nil { + // fallback to the originating reference if the reported local repository spec is not valid + return latest.DockerImageReference + } + + // create a local pullthrough URL + ref.Tag = "" + ref.ID = latest.Image + return ref.Exact() + + // the default policy is to use the originating image + default: + return latest.DockerImageReference + } +} + +// DigestOrImageMatch matches the digest in the image name. +func DigestOrImageMatch(image, imageID string) bool { + if d, err := ParseDigest(image); err == nil { + return strings.HasPrefix(d.Hex(), imageID) || strings.HasPrefix(image, imageID) + } + return strings.HasPrefix(image, imageID) +} + +// ParseDockerImageReference parses a Docker pull spec string into a +// DockerImageReference. +func ParseDockerImageReference(spec string) (imagev1.DockerImageReference, error) { + ref, err := imagereference.Parse(spec) + if err != nil { + return imagev1.DockerImageReference{}, err + } + return imagev1.DockerImageReference{ + Registry: ref.Registry, + Namespace: ref.Namespace, + Name: ref.Name, + Tag: ref.Tag, + ID: ref.ID, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers_test.go b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers_test.go new file mode 100644 index 00000000000..40ae2a0609c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers_test.go @@ -0,0 +1,155 @@ +package imageutil + +import ( + "reflect" + "testing" +) + +func TestJoinImageStreamTag(t *testing.T) { + if e, a := "foo:bar", JoinImageStreamTag("foo", "bar"); e != a { + t.Errorf("Unexpected value: %s", a) + } + if e, a := "foo:"+DefaultImageTag, JoinImageStreamTag("foo", ""); e != a { + t.Errorf("Unexpected value: %s", a) + } +} + +func TestParseImageStreamTagName(t *testing.T) { + tests := map[string]struct { + id string + expectedName string + expectedTag string + expectError bool + }{ + "empty id": { + id: "", + expectError: true, + }, + "missing semicolon": { + id: "hello", + expectError: true, + }, + "too many semicolons": { + id: "a:b:c", + expectError: true, + }, + "empty name": { + id: ":tag", + expectError: true, + }, + "empty tag": { + id: "name", + expectError: true, + }, + "happy path": { + id: "name:tag", + expectError: false, + expectedName: "name", + expectedTag: "tag", + }, + } + + for description, testCase := range tests { + name, tag, err := ParseImageStreamTagName(testCase.id) + gotError := err != nil + if e, a := testCase.expectError, gotError; e != a { + t.Fatalf("%s: expected err: %t, got: %t: %s", description, e, a, err) + } + if err != nil { + continue + } + if e, a := testCase.expectedName, name; e != a { + t.Errorf("%s: name: expected %q, got %q", description, e, a) + } + if e, a := testCase.expectedTag, tag; e != a { + t.Errorf("%s: tag: expected %q, got %q", description, e, a) + } + } +} + +func TestParseImageStreamImageName(t *testing.T) { + tests := map[string]struct { + input string + expectedRepo string + expectedId string + expectError bool + }{ + "empty string": { + input: "", + expectError: true, + }, + "one part": { + input: "a", + expectError: true, + }, + "more than 2 parts": { + input: "a@b@c", + expectError: true, + }, + "empty name part": { + input: "@id", + expectError: true, + }, + "empty id part": { + input: "name@", + expectError: true, + }, + "valid input": { + input: "repo@id", + expectedRepo: "repo", + expectedId: "id", + expectError: false, + }, + } + + for name, test := range tests { + repo, id, err := ParseImageStreamImageName(test.input) + didError := err != nil + if e, a := test.expectError, didError; e != a { + t.Errorf("%s: expected error=%t, got=%t: %s", name, e, a, err) + continue + } + if test.expectError { + continue + } + if e, a := test.expectedRepo, repo; e != a { + t.Errorf("%s: repo: expected %q, got %q", name, e, a) + continue + } + if e, a := test.expectedId, id; e != a { + t.Errorf("%s: id: expected %q, got %q", name, e, a) + continue + } + } +} +func TestPrioritizeTags(t *testing.T) { + tests := []struct { + tags []string + expected []string + }{ + { + tags: []string{"other", "latest", "v5.5", "5.2.3", "5.5", "v5.3.6-bother", "5.3.6-abba", "5.6"}, + expected: []string{"latest", "5.6", "5.5", "v5.5", "v5.3.6-bother", "5.3.6-abba", "5.2.3", "other"}, + }, + { + tags: []string{"1.1-beta1", "1.2-rc1", "1.1-rc1", "1.1-beta2", "1.2-beta1", "1.2-alpha1", "1.2-beta4", "latest"}, + expected: []string{"latest", "1.2-rc1", "1.2-beta4", "1.2-beta1", "1.2-alpha1", "1.1-rc1", "1.1-beta2", "1.1-beta1"}, + }, + { + tags: []string{"7.1", "v7.1", "7.1.0"}, + expected: []string{"7.1", "v7.1", "7.1.0"}, + }, + { + tags: []string{"7.1.0", "v7.1", "7.1"}, + expected: []string{"7.1", "v7.1", "7.1.0"}, + }, + } + + for _, tc := range tests { + t.Log("sorting", tc.tags) + PrioritizeTags(tc.tags) + if !reflect.DeepEqual(tc.tags, tc.expected) { + t.Errorf("got %v, want %v", tc.tags, tc.expected) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go new file mode 100644 index 00000000000..a740c2d9a00 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digest.go @@ -0,0 +1,138 @@ +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +const ( + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch algorithm := Algorithm(s[:i]); algorithm { + case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go new file mode 100644 index 00000000000..f3105a45b69 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/digester.go @@ -0,0 +1,155 @@ +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go new file mode 100644 index 00000000000..6e6e4347ea4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/digest/doc.go @@ -0,0 +1,5 @@ +// digest is a copy from "github.com/docker/distribution/digest" that is kept because we want to avoid the godep, +// this package has no non-standard dependencies, and if it changes lots of other docker registry stuff breaks. +// Don't try this at home! +// Changes here require sign-off from openshift/api-reviewers and they will be rejected. +package digest diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go new file mode 100644 index 00000000000..22188ea98f7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/doc.go @@ -0,0 +1,5 @@ +// reference is a copy from "github.com/docker/distribution/reference" that is kept because we want to avoid the godep, +// this package has no non-standard dependencies, and if it changes lots of other docker registry stuff breaks. +// Don't try this at home! +// Changes here require sign-off from openshift/api-reviewers and they will be rejected. +package reference diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go new file mode 100644 index 00000000000..eb498bc9d97 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/reference.go @@ -0,0 +1,370 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +package reference + +import ( + "errors" + "fmt" + "path" + "strings" + + "github.com/openshift/library-go/pkg/image/internal/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + if canonical, ok := name.(Canonical); ok { + return reference{ + name: name.Name(), + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + if tagged, ok := name.(Tagged); ok { + return reference{ + name: name.Name(), + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +// Match reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func Match(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, ref.String()) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, namedRef.Name()) + } + return matched, err +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + return repository(ref.Name()) +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return string(d) +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go new file mode 100644 index 00000000000..9a7d366bc8a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/internal/reference/regexp.go @@ -0,0 +1,124 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go b/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go new file mode 100644 index 00000000000..ee47ff92a9c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/reference/reference.go @@ -0,0 +1,245 @@ +package reference + +import ( + "net" + "net/url" + "strings" + + "github.com/openshift/library-go/pkg/image/internal/digest" + "github.com/openshift/library-go/pkg/image/internal/reference" +) + +// DockerImageReference points to a Docker image. +type DockerImageReference struct { + Registry string + Namespace string + Name string + Tag string + ID string +} + +const ( + // DockerDefaultRegistry is the value for the registry when none was provided. + DockerDefaultRegistry = "docker.io" + // DockerDefaultV1Registry is the host name of the default v1 registry + DockerDefaultV1Registry = "index." + DockerDefaultRegistry + // DockerDefaultV2Registry is the host name of the default v2 registry + DockerDefaultV2Registry = "registry-1." + DockerDefaultRegistry +) + +// Parse parses a Docker pull spec string into a +// DockerImageReference. +func Parse(spec string) (DockerImageReference, error) { + var ref DockerImageReference + + namedRef, err := reference.ParseNamed(spec) + if err != nil { + return ref, err + } + + name := namedRef.Name() + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ":.") && name[:i] != "localhost") { + ref.Name = name + } else { + ref.Registry, ref.Name = name[:i], name[i+1:] + } + + if named, ok := namedRef.(reference.NamedTagged); ok { + ref.Tag = named.Tag() + } + + if named, ok := namedRef.(reference.Canonical); ok { + ref.ID = named.Digest().String() + } + + // It's not enough just to use the reference.ParseNamed(). We have to fill + // ref.Namespace from ref.Name + if i := strings.IndexRune(ref.Name, '/'); i != -1 { + ref.Namespace, ref.Name = ref.Name[:i], ref.Name[i+1:] + } + + return ref, nil +} + +// Equal returns true if the other DockerImageReference is equivalent to the +// reference r. The comparison applies defaults to the Docker image reference, +// so that e.g., "foobar" equals "docker.io/library/foobar:latest". +func (r DockerImageReference) Equal(other DockerImageReference) bool { + defaultedRef := r.DockerClientDefaults() + otherDefaultedRef := other.DockerClientDefaults() + return defaultedRef == otherDefaultedRef +} + +// DockerClientDefaults sets the default values used by the Docker client. +func (r DockerImageReference) DockerClientDefaults() DockerImageReference { + if len(r.Registry) == 0 { + r.Registry = DockerDefaultRegistry + } + if len(r.Namespace) == 0 && IsRegistryDockerHub(r.Registry) { + r.Namespace = "library" + } + if len(r.Tag) == 0 { + r.Tag = "latest" + } + return r +} + +// Minimal reduces a DockerImageReference to its minimalist form. +func (r DockerImageReference) Minimal() DockerImageReference { + if r.Tag == "latest" { + r.Tag = "" + } + return r +} + +// AsRepository returns the reference without tags or IDs. +func (r DockerImageReference) AsRepository() DockerImageReference { + r.Tag = "" + r.ID = "" + return r +} + +// RepositoryName returns the registry relative name +func (r DockerImageReference) RepositoryName() string { + r.Tag = "" + r.ID = "" + r.Registry = "" + return r.Exact() +} + +// RegistryHostPort returns the registry hostname and the port. +// If the port is not specified in the registry hostname we default to 443. +// This will also default to Docker client defaults if the registry hostname is empty. +func (r DockerImageReference) RegistryHostPort(insecure bool) (string, string) { + registryHost := r.AsV2().DockerClientDefaults().Registry + if strings.Contains(registryHost, ":") { + hostname, port, _ := net.SplitHostPort(registryHost) + return hostname, port + } + if insecure { + return registryHost, "80" + } + return registryHost, "443" +} + +// RepositoryName returns the registry relative name +func (r DockerImageReference) RegistryURL() *url.URL { + return &url.URL{ + Scheme: "https", + Host: r.AsV2().Registry, + } +} + +// DaemonMinimal clears defaults that Docker assumes. +func (r DockerImageReference) DaemonMinimal() DockerImageReference { + switch r.Registry { + case DockerDefaultV1Registry, DockerDefaultV2Registry: + r.Registry = DockerDefaultRegistry + } + if IsRegistryDockerHub(r.Registry) && r.Namespace == "library" { + r.Namespace = "" + } + return r.Minimal() +} + +func (r DockerImageReference) AsV2() DockerImageReference { + switch r.Registry { + case DockerDefaultV1Registry, DockerDefaultRegistry: + r.Registry = DockerDefaultV2Registry + } + return r +} + +// MostSpecific returns the most specific image reference that can be constructed from the +// current ref, preferring an ID over a Tag. Allows client code dealing with both tags and IDs +// to get the most specific reference easily. +func (r DockerImageReference) MostSpecific() DockerImageReference { + if len(r.ID) == 0 { + return r + } + if _, err := digest.ParseDigest(r.ID); err == nil { + r.Tag = "" + return r + } + if len(r.Tag) == 0 { + r.Tag, r.ID = r.ID, "" + return r + } + return r +} + +// NameString returns the name of the reference with its tag or ID. +func (r DockerImageReference) NameString() string { + switch { + case len(r.Name) == 0: + return "" + case len(r.ID) > 0: + var ref string + if _, err := digest.ParseDigest(r.ID); err == nil { + // if it parses as a digest, its v2 pull by id + ref = "@" + r.ID + } else { + // if it doesn't parse as a digest, it's presumably a v1 registry by-id tag + ref = ":" + r.ID + } + return r.Name + ref + case len(r.Tag) > 0: + return r.Name + ":" + r.Tag + default: + return r.Name + } +} + +// Exact returns a string representation of the set fields on the DockerImageReference +func (r DockerImageReference) Exact() string { + name := r.NameString() + if len(name) == 0 { + return name + } + s := r.Registry + if len(s) > 0 { + s += "/" + } + + if len(r.Namespace) != 0 { + s += r.Namespace + "/" + } + return s + name +} + +// String converts a DockerImageReference to a Docker pull spec (which implies a default namespace +// according to V1 Docker registry rules). Use Exact() if you want no defaulting. +func (r DockerImageReference) String() string { + if len(r.Namespace) == 0 && IsRegistryDockerHub(r.Registry) { + r.Namespace = "library" + } + return r.Exact() +} + +// IsRegistryDockerHub returns true if the given registry name belongs to +// Docker hub. +func IsRegistryDockerHub(registry string) bool { + switch registry { + case DockerDefaultRegistry, DockerDefaultV1Registry, DockerDefaultV2Registry: + return true + default: + return false + } +} + +// DeepCopyInto writing into out. in must be non-nil. +func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { + *out = *in + return +} + +// DeepCopy copies the receiver, creating a new DockerImageReference. +func (in *DockerImageReference) DeepCopy() *DockerImageReference { + if in == nil { + return nil + } + out := new(DockerImageReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/reference/reference_test.go b/vendor/github.com/openshift/library-go/pkg/image/reference/reference_test.go new file mode 100644 index 00000000000..b217b2aa6db --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/reference/reference_test.go @@ -0,0 +1,262 @@ +package reference + +import ( + "fmt" + "strings" + "testing" +) + +func TestParse(t *testing.T) { + testCases := []struct { + From string + Registry, Namespace, Name, Tag, ID string + Err bool + }{ + { + From: "foo", + Name: "foo", + }, + { + From: "foo:tag", + Name: "foo", + Tag: "tag", + }, + { + From: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Name: "sha256", + Tag: "3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + }, + { + From: "foo@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Name: "foo", + ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + }, + { + From: "bar/foo", + Namespace: "bar", + Name: "foo", + }, + { + From: "bar/foo:tag", + Namespace: "bar", + Name: "foo", + Tag: "tag", + }, + { + From: "bar/foo@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Namespace: "bar", + Name: "foo", + ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + }, + { + From: "bar/foo/baz", + Namespace: "bar", + Name: "foo/baz", + }, + { + From: "bar/library/baz", + Namespace: "bar", + Name: "library/baz", + }, + { + From: "bar/foo/baz:tag", + Namespace: "bar", + Name: "foo/baz", + Tag: "tag", + }, + { + From: "bar/foo/baz@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Namespace: "bar", + Name: "foo/baz", + ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + }, + { + From: "bar:5000/foo/baz", + Registry: "bar:5000", + Namespace: "foo", + Name: "baz", + }, + { + From: "bar:5000/library/baz", + Registry: "bar:5000", + Namespace: "library", + Name: "baz", + }, + { + From: "bar:5000/baz", + Registry: "bar:5000", + Name: "baz", + }, + { + From: "bar:5000/foo/baz:tag", + Registry: "bar:5000", + Namespace: "foo", + Name: "baz", + Tag: "tag", + }, + { + From: "bar:5000/foo/baz@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Registry: "bar:5000", + Namespace: "foo", + Name: "baz", + ID: "sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + }, + { + From: "myregistry.io/foo", + Registry: "myregistry.io", + Name: "foo", + }, + { + From: "localhost/bar", + Registry: "localhost", + Name: "bar", + }, + { + From: "docker.io/library/myapp", + Registry: "docker.io", + Namespace: "library", + Name: "myapp", + }, + { + From: "docker.io/myapp", + Registry: "docker.io", + Name: "myapp", + }, + { + From: "docker.io/user/myapp", + Registry: "docker.io", + Namespace: "user", + Name: "myapp", + }, + { + From: "docker.io/user/project/myapp", + Registry: "docker.io", + Namespace: "user", + Name: "project/myapp", + }, + { + From: "index.docker.io/bar", + Registry: "index.docker.io", + Name: "bar", + }, + { + // registry/namespace/name == 255 chars + From: fmt.Sprintf("bar:5000/%s/%s:tag", strings.Repeat("a", 63), strings.Repeat("b", 182)), + Registry: "bar:5000", + Namespace: strings.Repeat("a", 63), + Name: strings.Repeat("b", 182), + Tag: "tag", + }, + { + // docker.io/namespace/name == 255 chars with explicit namespace + From: fmt.Sprintf("docker.io/library/%s:tag", strings.Repeat("b", 231)), + Registry: "docker.io", + Namespace: "library", + Name: strings.Repeat("b", 231), + Tag: "tag", + }, + { + // docker.io/namespace/name == 255 chars with implicit namespace + From: fmt.Sprintf("docker.io/%s:tag", strings.Repeat("b", 231)), + Registry: "docker.io", + Name: strings.Repeat("b", 231), + Tag: "tag", + }, + { + // registry/namespace/name > 255 chars + From: fmt.Sprintf("bar:5000/%s/%s:tag", strings.Repeat("a", 63), strings.Repeat("b", 183)), + Err: true, + }, + { + // docker.io/name > 255 chars with implicit namespace + From: fmt.Sprintf("docker.io/%s:tag", strings.Repeat("b", 246)), + Err: true, + }, + { + From: "registry.io/foo/bar/:Tag", + Err: true, + }, + { + From: "https://bar:5000/foo/baz", + Err: true, + }, + { + From: "http://bar:5000/foo/baz@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Err: true, + }, + { + From: "registry:3000/integration/imageStream:success", + Err: true, + }, + { + From: "registry:5000/integration/test-image-stream@sha256:00000000000000000000000000000001", + Err: true, + }, + { + From: "abc@badid", + Err: true, + }, + { + From: "index.docker.io/mysql@sha256:bad", + Err: true, + }, + { + From: "@sha256:3c87c572822935df60f0f5d3665bd376841a7fcfeb806b5f212de6a00e9a7b25", + Err: true, + }, + { + From: ":tag", + Err: true, + }, + { + From: "bar/foo/baz/biz", + Namespace: "bar", + Name: "foo/baz/biz", + }, + { + From: "bar/foo/baz////biz", + Err: true, + }, + { + From: "//foo/baz/biz", + Err: true, + }, + { + From: "ftp://baz/baz/biz", + Err: true, + }, + { + From: "", + Err: true, + }, + } + + for _, testCase := range testCases { + ref, err := Parse(testCase.From) + switch { + case err != nil && !testCase.Err: + t.Errorf("%s: unexpected error: %v", testCase.From, err) + continue + case err == nil && testCase.Err: + t.Errorf("%s: unexpected non-error: %#+v", testCase.From, ref) + continue + case err != nil && testCase.Err: + continue + } + if e, a := testCase.Registry, ref.Registry; e != a { + t.Errorf("%s: registry: expected %q, got %q", testCase.From, e, a) + } + if e, a := testCase.Namespace, ref.Namespace; e != a { + t.Errorf("%s: namespace: expected %q, got %q", testCase.From, e, a) + } + if e, a := testCase.Name, ref.Name; e != a { + t.Errorf("%s: name: expected %q, got %q", testCase.From, e, a) + } + if e, a := testCase.Tag, ref.Tag; e != a { + t.Errorf("%s: tag: expected %q, got %q", testCase.From, e, a) + } + if e, a := testCase.ID, ref.ID; e != a { + t.Errorf("%s: id: expected %q, got %q", testCase.From, e, a) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds.go new file mode 100644 index 00000000000..663aa96091d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds.go @@ -0,0 +1,120 @@ +package referencemutator + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + + buildv1 "github.com/openshift/api/build/v1" +) + +type buildSpecMutator struct { + spec *buildv1.CommonSpec + oldSpec *buildv1.CommonSpec + path *field.Path + output bool +} + +// NewBuildMutator returns an ImageReferenceMutator that includes the output field. +func NewBuildMutator(build *buildv1.Build) ImageReferenceMutator { + return &buildSpecMutator{ + spec: &build.Spec.CommonSpec, + path: field.NewPath("spec"), + output: true, + } +} + +func hasIdenticalImageSourceObjectReference(spec *buildv1.CommonSpec, ref corev1.ObjectReference) bool { + if spec == nil { + return false + } + for i := range spec.Source.Images { + if spec.Source.Images[i].From == ref { + return true + } + } + return false +} + +func hasIdenticalStrategyFrom(spec, oldSpec *buildv1.CommonSpec) bool { + if oldSpec == nil { + return false + } + switch { + case spec.Strategy.CustomStrategy != nil: + if oldSpec.Strategy.CustomStrategy != nil { + return spec.Strategy.CustomStrategy.From == oldSpec.Strategy.CustomStrategy.From + } + case spec.Strategy.DockerStrategy != nil: + if oldSpec.Strategy.DockerStrategy != nil { + return hasIdenticalObjectReference(spec.Strategy.DockerStrategy.From, oldSpec.Strategy.DockerStrategy.From) + } + case spec.Strategy.SourceStrategy != nil: + if oldSpec.Strategy.SourceStrategy != nil { + return spec.Strategy.SourceStrategy.From == oldSpec.Strategy.SourceStrategy.From + } + } + return false +} + +func hasIdenticalObjectReference(ref, oldRef *corev1.ObjectReference) bool { + if ref == nil || oldRef == nil { + return false + } + return *ref == *oldRef +} + +func (m *buildSpecMutator) Mutate(fn ImageReferenceMutateFunc) field.ErrorList { + var errs field.ErrorList + for i := range m.spec.Source.Images { + if hasIdenticalImageSourceObjectReference(m.oldSpec, m.spec.Source.Images[i].From) { + continue + } + if err := fn(&m.spec.Source.Images[i].From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("source", "images").Index(i).Child("from", "name"))) + continue + } + } + if !hasIdenticalStrategyFrom(m.spec, m.oldSpec) { + if s := m.spec.Strategy.CustomStrategy; s != nil { + if err := fn(&s.From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("strategy", "customStrategy", "from", "name"))) + } + } + if s := m.spec.Strategy.DockerStrategy; s != nil { + if s.From != nil { + if err := fn(s.From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("strategy", "dockerStrategy", "from", "name"))) + } + } + } + if s := m.spec.Strategy.SourceStrategy; s != nil { + if err := fn(&s.From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("strategy", "sourceStrategy", "from", "name"))) + } + } + } + if m.output { + if s := m.spec.Output.To; s != nil { + if m.oldSpec == nil || m.oldSpec.Output.To == nil || !hasIdenticalObjectReference(s, m.oldSpec.Output.To) { + if err := fn(s); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("output", "to"))) + } + } + } + } + return errs +} + +func fieldErrorOrInternal(err error, path *field.Path) *field.Error { + if ferr, ok := err.(*field.Error); ok { + if len(ferr.Field) == 0 { + ferr.Field = path.String() + } + return ferr + } + if errors.IsNotFound(err) { + return field.NotFound(path, err) + } + return field.InternalError(path, err) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds_test.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds_test.go new file mode 100644 index 00000000000..a40828652f8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds_test.go @@ -0,0 +1,289 @@ +package referencemutator + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + buildv1 "github.com/openshift/api/build/v1" +) + +func imageRef(name string) *corev1.ObjectReference { + ref := imageRefValue(name) + return &ref +} +func imageRefValue(name string) corev1.ObjectReference { + return corev1.ObjectReference{Kind: "DockerImage", Name: name} +} + +func Test_buildSpecMutator_Mutate(t *testing.T) { + type fields struct { + spec *buildv1.CommonSpec + oldSpec *buildv1.CommonSpec + path *field.Path + output bool + } + type args struct { + fn ImageReferenceMutateFunc + } + tests := []struct { + name string + fields fields + args args + want field.ErrorList + wantSpec *buildv1.CommonSpec + }{ + { + name: "no-op", + fields: fields{spec: &buildv1.CommonSpec{}}, + }, + { + name: "passes reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if !reflect.DeepEqual(ref, imageRef("test")) { + t.Errorf("unexpected ref: %#v", ref) + } + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + }, + { + name: "mutates docker reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + ref.Name = "test-2" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test-2")}, + }, + }, + }, + { + name: "mutates source reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + ref.Name = "test-2" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test-2")}, + }, + }, + }, + { + name: "mutates custom reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + ref.Name = "test-2" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test-2")}, + }, + }, + }, + { + name: "mutates image source references", + fields: fields{spec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-2")}, + {From: imageRefValue("test-3")}, + }}, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name == "test-2" { + ref.Name = "test-4" + } + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-4")}, + {From: imageRefValue("test-3")}, + }}, + }, + }, + { + name: "mutates only changed references", + fields: fields{ + spec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-2")}, + {From: imageRefValue("test-3")}, + }}, + }, + oldSpec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-3")}, + }}, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name != "test-2" { + t.Errorf("did not expect to be called for existing reference") + } + ref.Name = "test-4" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-4")}, + {From: imageRefValue("test-3")}, + }}, + }, + }, + { + name: "skips when docker reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + }, + { + name: "skips when custom reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + { + name: "skips when source reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + { + name: "skips when source reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{ + From: imageRefValue("test"), + }, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{ + From: imageRefValue("test"), + }, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{ + From: imageRefValue("test"), + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &buildSpecMutator{ + spec: tt.fields.spec, + oldSpec: tt.fields.oldSpec, + path: tt.fields.path, + output: tt.fields.output, + } + if tt.wantSpec == nil { + tt.wantSpec = &buildv1.CommonSpec{} + } + if got := m.Mutate(tt.args.fn); !reflect.DeepEqual(got, tt.want) { + t.Errorf("buildSpecMutator.Mutate() = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(tt.wantSpec, tt.fields.spec) { + t.Errorf("buildSpecMutator.Mutate() spec = %#v, want %#v", tt.fields.spec, tt.wantSpec) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/meta.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/meta.go new file mode 100644 index 00000000000..effde51656b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/meta.go @@ -0,0 +1,109 @@ +package referencemutator + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + + buildv1 "github.com/openshift/api/build/v1" +) + +// ImageReferenceMutateFunc is passed a reference representing an image, and may alter +// the Name, Kind, and Namespace fields of the reference. If an error is returned the +// object may still be mutated under the covers. +type ImageReferenceMutateFunc func(ref *corev1.ObjectReference) error + +type ImageReferenceMutator interface { + // Mutate invokes fn on every image reference in the object. If fn returns an error, + // a field.Error is added to the list to be returned. Mutate does not terminate early + // if errors are detected. + Mutate(fn ImageReferenceMutateFunc) field.ErrorList +} + +var errNoImageMutator = fmt.Errorf("no list of images available for this object") + +// GetImageReferenceMutator returns a mutator for the provided object, or an error if no +// such mutator is defined. Only references that are different between obj and old will +// be returned unless old is nil. +func GetImageReferenceMutator(obj, old runtime.Object) (ImageReferenceMutator, error) { + switch t := obj.(type) { + case *buildv1.Build: + if oldT, ok := old.(*buildv1.Build); ok && oldT != nil { + return &buildSpecMutator{spec: &t.Spec.CommonSpec, oldSpec: &oldT.Spec.CommonSpec, path: field.NewPath("spec")}, nil + } + return &buildSpecMutator{spec: &t.Spec.CommonSpec, path: field.NewPath("spec")}, nil + case *buildv1.BuildConfig: + if oldT, ok := old.(*buildv1.BuildConfig); ok && oldT != nil { + return &buildSpecMutator{spec: &t.Spec.CommonSpec, oldSpec: &oldT.Spec.CommonSpec, path: field.NewPath("spec")}, nil + } + return &buildSpecMutator{spec: &t.Spec.CommonSpec, path: field.NewPath("spec")}, nil + default: + if spec, path, err := GetPodSpecV1(obj); err == nil { + if old == nil { + return &podSpecV1Mutator{spec: spec, path: path}, nil + } + oldSpec, _, err := GetPodSpecV1(old) + if err != nil { + return nil, fmt.Errorf("old and new pod spec objects were not of the same type %T != %T: %v", obj, old, err) + } + return &podSpecV1Mutator{spec: spec, oldSpec: oldSpec, path: path}, nil + } + return nil, errNoImageMutator + } +} + +type AnnotationAccessor interface { + // Annotations returns a map representing annotations. Not mutable. + Annotations() map[string]string + // SetAnnotations sets representing annotations onto the object. + SetAnnotations(map[string]string) + // TemplateAnnotations returns a map representing annotations on a nested template in the object. Not mutable. + // If no template is present bool will be false. + TemplateAnnotations() (map[string]string, bool) + // SetTemplateAnnotations sets annotations on a nested template in the object. + // If no template is present bool will be false. + SetTemplateAnnotations(map[string]string) bool +} + +type annotationsAccessor struct { + object metav1.Object + template metav1.Object +} + +func (a annotationsAccessor) Annotations() map[string]string { + return a.object.GetAnnotations() +} + +func (a annotationsAccessor) TemplateAnnotations() (map[string]string, bool) { + if a.template == nil { + return nil, false + } + return a.template.GetAnnotations(), true +} + +func (a annotationsAccessor) SetAnnotations(annotations map[string]string) { + a.object.SetAnnotations(annotations) +} + +func (a annotationsAccessor) SetTemplateAnnotations(annotations map[string]string) bool { + if a.template == nil { + return false + } + a.template.SetAnnotations(annotations) + return true +} + +// GetAnnotationAccessor returns an accessor for the provided object or false if the object +// does not support accessing annotations. +func GetAnnotationAccessor(obj runtime.Object) (AnnotationAccessor, bool) { + switch t := obj.(type) { + case metav1.Object: + templateObject, _ := GetTemplateMetaObject(obj) + return annotationsAccessor{object: t, template: templateObject}, true + default: + return nil, false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods.go new file mode 100644 index 00000000000..28c29378d92 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods.go @@ -0,0 +1,302 @@ +package referencemutator + +import ( + "fmt" + + kappsv1 "k8s.io/api/apps/v1" + kappsv1beta1 "k8s.io/api/apps/v1beta1" + kappsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + + appsv1 "github.com/openshift/api/apps/v1" + securityv1 "github.com/openshift/api/security/v1" +) + +type ContainerMutator interface { + GetName() string + GetImage() string + SetImage(image string) +} + +type PodSpecReferenceMutator interface { + GetContainerByIndex(init bool, i int) (ContainerMutator, bool) + GetContainerByName(name string) (ContainerMutator, bool) + Path() *field.Path +} + +// GetPodSpecReferenceMutator returns a mutator for the provided object, or an error if no +// such mutator is defined. +func GetPodSpecReferenceMutator(obj runtime.Object) (PodSpecReferenceMutator, error) { + if spec, path, err := GetPodSpecV1(obj); err == nil { + return &podSpecV1Mutator{spec: spec, path: path}, nil + } + return nil, errNoImageMutator +} + +var errNoPodSpec = fmt.Errorf("No PodSpec available for this object") + +// GetPodSpecV1 returns a mutable pod spec out of the provided object, including a field path +// to the field in the object, or an error if the object does not contain a pod spec. +// This only returns pod specs for v1 compatible objects. +func GetPodSpecV1(obj runtime.Object) (*corev1.PodSpec, *field.Path, error) { + switch r := obj.(type) { + + case *corev1.Pod: + return &r.Spec, field.NewPath("spec"), nil + + case *corev1.PodTemplate: + return &r.Template.Spec, field.NewPath("template", "spec"), nil + + case *corev1.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + + case *extensionsv1beta1.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *extensionsv1beta1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *extensionsv1beta1.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *batchv1.Job: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *batchv2alpha1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil + case *batchv1beta1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil + + case *batchv2alpha1.JobTemplate: + return &r.Template.Spec.Template.Spec, field.NewPath("template", "spec", "template", "spec"), nil + case *batchv1beta1.JobTemplate: + return &r.Template.Spec.Template.Spec, field.NewPath("template", "spec", "template", "spec"), nil + + case *kappsv1.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta1.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *securityv1.PodSecurityPolicySubjectReview: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *securityv1.PodSecurityPolicySelfSubjectReview: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *securityv1.PodSecurityPolicyReview: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *appsv1.DeploymentConfig: + if r.Spec.Template != nil { + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + } + return nil, nil, errNoPodSpec +} + +// GetTemplateMetaObject returns a mutable metav1.Object interface for the template +// the object contains, or false if no such object is available. +func GetTemplateMetaObject(obj runtime.Object) (metav1.Object, bool) { + switch r := obj.(type) { + + case *corev1.PodTemplate: + return &r.Template.ObjectMeta, true + + case *corev1.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.ObjectMeta, true + } + + case *extensionsv1beta1.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + + case *extensionsv1beta1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.Deployment: + return &r.Spec.Template.ObjectMeta, true + + case *extensionsv1beta1.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + + case *batchv1.Job: + return &r.Spec.Template.ObjectMeta, true + + case *batchv2alpha1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true + case *batchv1beta1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true + + case *batchv2alpha1.JobTemplate: + return &r.Template.Spec.Template.ObjectMeta, true + case *batchv1beta1.JobTemplate: + return &r.Template.Spec.Template.ObjectMeta, true + + case *kappsv1.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta1.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + + case *securityv1.PodSecurityPolicySubjectReview: + return &r.Spec.Template.ObjectMeta, true + + case *securityv1.PodSecurityPolicySelfSubjectReview: + return &r.Spec.Template.ObjectMeta, true + + case *securityv1.PodSecurityPolicyReview: + return &r.Spec.Template.ObjectMeta, true + + case *appsv1.DeploymentConfig: + if r.Spec.Template != nil { + return &r.Spec.Template.ObjectMeta, true + } + } + return nil, false +} + +type containerV1Mutator struct { + *corev1.Container +} + +func (m containerV1Mutator) GetName() string { return m.Name } +func (m containerV1Mutator) GetImage() string { return m.Image } +func (m containerV1Mutator) SetImage(image string) { m.Image = image } + +// podSpecV1Mutator implements the mutation interface over objects with a pod spec. +type podSpecV1Mutator struct { + spec *corev1.PodSpec + oldSpec *corev1.PodSpec + path *field.Path +} + +func (m *podSpecV1Mutator) Path() *field.Path { + return m.path +} + +func hasIdenticalPodSpecV1Image(spec *corev1.PodSpec, containerName, image string) bool { + if spec == nil { + return false + } + for i := range spec.InitContainers { + if spec.InitContainers[i].Name == containerName { + return spec.InitContainers[i].Image == image + } + } + for i := range spec.Containers { + if spec.Containers[i].Name == containerName { + return spec.Containers[i].Image == image + } + } + return false +} + +// Mutate applies fn to all containers and init containers. If fn changes the Kind to +// any value other than "DockerImage", an error is set on that field. +func (m *podSpecV1Mutator) Mutate(fn ImageReferenceMutateFunc) field.ErrorList { + var errs field.ErrorList + for i := range m.spec.InitContainers { + container := &m.spec.InitContainers[i] + if hasIdenticalPodSpecV1Image(m.oldSpec, container.Name, container.Image) { + continue + } + ref := corev1.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, fieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + for i := range m.spec.Containers { + container := &m.spec.Containers[i] + if hasIdenticalPodSpecV1Image(m.oldSpec, container.Name, container.Image) { + continue + } + ref := corev1.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("containers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, fieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("containers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + return errs +} + +func (m *podSpecV1Mutator) GetContainerByName(name string) (ContainerMutator, bool) { + spec := m.spec + for i := range spec.InitContainers { + if name != spec.InitContainers[i].Name { + continue + } + return containerV1Mutator{&spec.InitContainers[i]}, true + } + for i := range spec.Containers { + if name != spec.Containers[i].Name { + continue + } + return containerV1Mutator{&spec.Containers[i]}, true + } + return nil, false +} + +func (m *podSpecV1Mutator) GetContainerByIndex(init bool, i int) (ContainerMutator, bool) { + var container *corev1.Container + spec := m.spec + if init { + if i < 0 || i >= len(spec.InitContainers) { + return nil, false + } + container = &spec.InitContainers[i] + } else { + if i < 0 || i >= len(spec.Containers) { + return nil, false + } + container = &spec.Containers[i] + } + return containerV1Mutator{container}, true +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods_test.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods_test.go new file mode 100644 index 00000000000..545b1fa8de3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods_test.go @@ -0,0 +1,150 @@ +package referencemutator + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func Test_podSpecV1Mutator_Mutate(t *testing.T) { + type fields struct { + spec *corev1.PodSpec + oldSpec *corev1.PodSpec + path *field.Path + } + type args struct { + fn ImageReferenceMutateFunc + } + tests := []struct { + name string + fields fields + args args + want field.ErrorList + wantSpec *corev1.PodSpec + }{ + { + name: "no-op", + fields: fields{spec: &corev1.PodSpec{}}, + }, + { + name: "passes init container reference", + fields: fields{spec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if !reflect.DeepEqual(ref, imageRef("test")) { + t.Errorf("unexpected ref: %#v", ref) + } + return nil + }}, + wantSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }, + }, + { + name: "passes container reference", + fields: fields{spec: &corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if !reflect.DeepEqual(ref, imageRef("test")) { + t.Errorf("unexpected ref: %#v", ref) + } + return nil + }}, + wantSpec: &corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }, + }, + + { + name: "mutates reference", + fields: fields{spec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name == "test-2" { + ref.Name = "test-3" + } + return nil + }}, + wantSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-3"}, + }, + }, + }, + { + name: "mutates only changed references", + fields: fields{ + spec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }, + oldSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test-1"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name != "test" { + t.Errorf("did not expect to be called for existing reference") + } + ref.Name = "test-3" + return nil + }}, + wantSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test-3"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &podSpecV1Mutator{ + spec: tt.fields.spec, + oldSpec: tt.fields.oldSpec, + path: tt.fields.path, + } + if tt.wantSpec == nil { + tt.wantSpec = &corev1.PodSpec{} + } + if got := m.Mutate(tt.args.fn); !reflect.DeepEqual(got, tt.want) { + t.Errorf("buildSpecMutator.Mutate() = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(tt.wantSpec, tt.fields.spec) { + t.Errorf("buildSpecMutator.Mutate() spec = %v, want %v", tt.fields.spec, tt.wantSpec) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go new file mode 100644 index 00000000000..996a97e58a5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go @@ -0,0 +1,710 @@ +package registryclient + +import ( + "fmt" + "hash" + "io" + "net" + "net/http" + "net/url" + "path" + "sort" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/time/rate" + + "k8s.io/klog" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + registryclient "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/opencontainers/go-digest" +) + +// RepositoryRetriever fetches a Docker distribution.Repository. +type RepositoryRetriever interface { + // Repository returns a properly authenticated distribution.Repository for the given registry, repository + // name, and insecure toleration behavior. + Repository(ctx context.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) +} + +// ErrNotV2Registry is returned when the server does not report itself as a V2 Docker registry +type ErrNotV2Registry struct { + Registry string +} + +func (e *ErrNotV2Registry) Error() string { + return fmt.Sprintf("endpoint %q does not support v2 API", e.Registry) +} + +type AuthHandlersFunc func(transport http.RoundTripper, registry *url.URL, repoName string) []auth.AuthenticationHandler + +// NewContext is capable of creating RepositoryRetrievers. +func NewContext(transport, insecureTransport http.RoundTripper) *Context { + return &Context{ + Transport: transport, + InsecureTransport: insecureTransport, + Challenges: challenge.NewSimpleManager(), + Actions: []string{"pull"}, + Retries: 2, + Credentials: NoCredentials, + + pings: make(map[url.URL]error), + redirect: make(map[url.URL]*url.URL), + } +} + +type transportCache struct { + rt http.RoundTripper + scopes map[string]struct{} + transport http.RoundTripper +} + +type Context struct { + Transport http.RoundTripper + InsecureTransport http.RoundTripper + Challenges challenge.Manager + Scopes []auth.Scope + Actions []string + Retries int + Credentials auth.CredentialStore + Limiter *rate.Limiter + + DisableDigestVerification bool + + lock sync.Mutex + pings map[url.URL]error + redirect map[url.URL]*url.URL + cachedTransports []transportCache +} + +func (c *Context) Copy() *Context { + c.lock.Lock() + defer c.lock.Unlock() + copied := &Context{ + Transport: c.Transport, + InsecureTransport: c.InsecureTransport, + Challenges: c.Challenges, + Scopes: c.Scopes, + Actions: c.Actions, + Retries: c.Retries, + Credentials: c.Credentials, + Limiter: c.Limiter, + + DisableDigestVerification: c.DisableDigestVerification, + + pings: make(map[url.URL]error), + redirect: make(map[url.URL]*url.URL), + } + for k, v := range c.redirect { + copied.redirect[k] = v + } + return copied +} + +func (c *Context) WithRateLimiter(limiter *rate.Limiter) *Context { + c.Limiter = limiter + return c +} + +func (c *Context) WithScopes(scopes ...auth.Scope) *Context { + c.Scopes = scopes + return c +} + +func (c *Context) WithActions(actions ...string) *Context { + c.Actions = actions + return c +} + +func (c *Context) WithCredentials(credentials auth.CredentialStore) *Context { + c.Credentials = credentials + return c +} + +// Reset clears any cached repository info for this context. +func (c *Context) Reset() { + c.lock.Lock() + defer c.lock.Unlock() + + c.pings = nil + c.redirect = nil +} + +func (c *Context) cachedPing(src url.URL) (*url.URL, error) { + c.lock.Lock() + defer c.lock.Unlock() + + err, ok := c.pings[src] + if !ok { + return nil, nil + } + if err != nil { + return nil, err + } + if redirect, ok := c.redirect[src]; ok { + src = *redirect + } + return &src, nil +} + +// Ping contacts a registry and returns the transport and URL of the registry or an error. +func (c *Context) Ping(ctx context.Context, registry *url.URL, insecure bool) (http.RoundTripper, *url.URL, error) { + t := c.Transport + if insecure && c.InsecureTransport != nil { + t = c.InsecureTransport + } + src := *registry + if len(src.Scheme) == 0 { + src.Scheme = "https" + } + + // reused cached pings + url, err := c.cachedPing(src) + if err != nil { + return nil, nil, err + } + if url != nil { + return t, url, nil + } + + // follow redirects + redirect, err := c.ping(src, insecure, t) + + c.lock.Lock() + defer c.lock.Unlock() + c.pings[src] = err + if err != nil { + return nil, nil, err + } + if redirect != nil { + c.redirect[src] = redirect + src = *redirect + } + return t, &src, nil +} + +func (c *Context) Repository(ctx context.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) { + named, err := reference.WithName(repoName) + if err != nil { + return nil, err + } + + rt, src, err := c.Ping(ctx, registry, insecure) + if err != nil { + return nil, err + } + + rt = c.repositoryTransport(rt, src, repoName) + + repo, err := registryclient.NewRepository(named, src.String(), rt) + if err != nil { + return nil, err + } + if !c.DisableDigestVerification { + repo = repositoryVerifier{Repository: repo} + } + limiter := c.Limiter + if limiter == nil { + limiter = rate.NewLimiter(rate.Limit(5), 5) + } + return NewLimitedRetryRepository(repo, c.Retries, limiter), nil +} + +func (c *Context) ping(registry url.URL, insecure bool, transport http.RoundTripper) (*url.URL, error) { + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + target := registry + target.Path = path.Join(target.Path, "v2") + "/" + req, err := http.NewRequest("GET", target.String(), nil) + if err != nil { + return nil, err + } + resp, err := pingClient.Do(req) + if err != nil { + if insecure && registry.Scheme == "https" { + klog.V(5).Infof("Falling back to an HTTP check for an insecure registry %s: %v", registry.String(), err) + registry.Scheme = "http" + _, nErr := c.ping(registry, true, transport) + if nErr != nil { + return nil, nErr + } + return ®istry, nil + } + return nil, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, "Docker-Distribution-API-Version") + if len(versions) == 0 { + klog.V(5).Infof("Registry responded to v2 Docker endpoint, but has no header for Docker Distribution %s: %d, %#v", req.URL, resp.StatusCode, resp.Header) + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 300: + // v2 + case resp.StatusCode == http.StatusUnauthorized, resp.StatusCode == http.StatusForbidden: + // v2 + default: + return nil, &ErrNotV2Registry{Registry: registry.String()} + } + } + + c.Challenges.AddResponse(resp) + + return nil, nil +} + +func hasAll(a, b map[string]struct{}) bool { + for key := range b { + if _, ok := a[key]; !ok { + return false + } + } + return true +} + +type stringScope string + +func (s stringScope) String() string { return string(s) } + +// cachedTransport reuses an underlying transport for the given round tripper based +// on the set of passed scopes. It will always return a transport that has at least the +// provided scope list. +func (c *Context) cachedTransport(rt http.RoundTripper, scopes []auth.Scope) http.RoundTripper { + scopeNames := make(map[string]struct{}) + for _, scope := range scopes { + scopeNames[scope.String()] = struct{}{} + } + + c.lock.Lock() + defer c.lock.Unlock() + for _, c := range c.cachedTransports { + if c.rt == rt && hasAll(c.scopes, scopeNames) { + return c.transport + } + } + + // avoid taking a dependency on kube sets.String for minimal dependencies + names := make([]string, 0, len(scopeNames)) + for s := range scopeNames { + names = append(names, s) + } + sort.Strings(names) + scopes = make([]auth.Scope, 0, len(scopeNames)) + for _, s := range names { + scopes = append(scopes, stringScope(s)) + } + + t := transport.NewTransport( + rt, + // TODO: slightly smarter authorizer that retries unauthenticated requests + // TODO: make multiple attempts if the first credential fails + auth.NewAuthorizer( + c.Challenges, + auth.NewTokenHandlerWithOptions(auth.TokenHandlerOptions{ + Transport: rt, + Credentials: c.Credentials, + Scopes: scopes, + }), + auth.NewBasicHandler(c.Credentials), + ), + ) + c.cachedTransports = append(c.cachedTransports, transportCache{ + rt: rt, + scopes: scopeNames, + transport: t, + }) + return t +} + +func (c *Context) scopes(repoName string) []auth.Scope { + scopes := make([]auth.Scope, 0, 1+len(c.Scopes)) + scopes = append(scopes, c.Scopes...) + if len(c.Actions) == 0 { + scopes = append(scopes, auth.RepositoryScope{Repository: repoName, Actions: []string{"pull"}}) + } else { + scopes = append(scopes, auth.RepositoryScope{Repository: repoName, Actions: c.Actions}) + } + return scopes +} + +func (c *Context) repositoryTransport(t http.RoundTripper, registry *url.URL, repoName string) http.RoundTripper { + return c.cachedTransport(t, c.scopes(repoName)) +} + +var nowFn = time.Now + +type retryRepository struct { + distribution.Repository + + limiter *rate.Limiter + retries int + sleepFn func(time.Duration) +} + +// NewLimitedRetryRepository wraps a distribution.Repository with helpers that will retry temporary failures +// over a limited time window and duration, and also obeys a rate limit. +func NewLimitedRetryRepository(repo distribution.Repository, retries int, limiter *rate.Limiter) distribution.Repository { + return &retryRepository{ + Repository: repo, + + limiter: limiter, + retries: retries, + sleepFn: time.Sleep, + } +} + +// isTemporaryHTTPError returns true if the error indicates a temporary or partial HTTP failure +func isTemporaryHTTPError(err error) (time.Duration, bool) { + if err == nil { + return 0, false + } + switch t := err.(type) { + case net.Error: + return time.Second, t.Temporary() || t.Timeout() + case errcode.ErrorCoder: + // note: we explicitly do not check errcode.ErrorCodeUnknown because that is used in + // a wide range of scenarios to convey "generic error", not "retryable error" + switch t.ErrorCode() { + case errcode.ErrorCodeUnavailable: + return 5 * time.Second, true + case errcode.ErrorCodeTooManyRequests: + return 2 * time.Second, true + } + case *registryclient.UnexpectedHTTPResponseError: + switch t.StatusCode { + case http.StatusInternalServerError, http.StatusGatewayTimeout, http.StatusServiceUnavailable, http.StatusBadGateway: + return 5 * time.Second, true + case http.StatusTooManyRequests: + return 2 * time.Second, true + } + } + return 0, false +} + +// shouldRetry returns true if the error was temporary and count is less than retries. +func (c *retryRepository) shouldRetry(count int, err error) bool { + if err == nil { + return false + } + retryAfter, ok := isTemporaryHTTPError(err) + if !ok { + return false + } + if count >= c.retries { + return false + } + c.sleepFn(retryAfter) + klog.V(4).Infof("Retrying request to Docker registry after encountering error (%d attempts remaining): %v", count, err) + return true +} + +// Manifests wraps the manifest service in a retryManifest for shared retries. +func (c *retryRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + s, err := c.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return retryManifest{ManifestService: s, repo: c}, nil +} + +// Blobs wraps the blob service in a retryBlobStore for shared retries. +func (c *retryRepository) Blobs(ctx context.Context) distribution.BlobStore { + return retryBlobStore{BlobStore: c.Repository.Blobs(ctx), repo: c} +} + +// Tags lists the tags under the named repository. +func (c *retryRepository) Tags(ctx context.Context) distribution.TagService { + return &retryTags{TagService: c.Repository.Tags(ctx), repo: c} +} + +// retryManifest wraps the manifest service and invokes retries on the repo. +type retryManifest struct { + distribution.ManifestService + repo *retryRepository +} + +// Exists returns true if the manifest exists. +func (c retryManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return false, err + } + exists, err := c.ManifestService.Exists(ctx, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return exists, err + } +} + +// Get retrieves the manifest identified by the digest, if it exists. +func (c retryManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + m, err := c.ManifestService.Get(ctx, dgst, options...) + if c.repo.shouldRetry(i, err) { + continue + } + return m, err + } +} + +// retryBlobStore wraps the blob store and invokes retries on the repo. +type retryBlobStore struct { + distribution.BlobStore + repo *retryRepository +} + +func (c retryBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return distribution.Descriptor{}, err + } + d, err := c.BlobStore.Stat(ctx, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return d, err + } +} + +func (c retryBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return err + } + err := c.BlobStore.ServeBlob(ctx, w, req, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return err + } +} + +func (c retryBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + rsc, err := c.BlobStore.Open(ctx, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return rsc, err + } +} + +type retryTags struct { + distribution.TagService + repo *retryRepository +} + +func (c *retryTags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return distribution.Descriptor{}, err + } + t, err := c.TagService.Get(ctx, tag) + if c.repo.shouldRetry(i, err) { + continue + } + return t, err + } +} + +func (c *retryTags) All(ctx context.Context) ([]string, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + t, err := c.TagService.All(ctx) + if c.repo.shouldRetry(i, err) { + continue + } + return t, err + } +} + +func (c *retryTags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + t, err := c.TagService.Lookup(ctx, digest) + if c.repo.shouldRetry(i, err) { + continue + } + return t, err + } +} + +// repositoryVerifier ensures that manifests are verified when they are retrieved via digest +type repositoryVerifier struct { + distribution.Repository +} + +// Manifests returns a ManifestService that checks whether manifests match their digest. +func (r repositoryVerifier) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + ms, err := r.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return manifestServiceVerifier{ManifestService: ms}, nil +} + +// Blobs returns a BlobStore that checks whether blob content returned from the server matches the expected digest. +func (r repositoryVerifier) Blobs(ctx context.Context) distribution.BlobStore { + return blobStoreVerifier{BlobStore: r.Repository.Blobs(ctx)} +} + +// manifestServiceVerifier wraps the manifest service and ensures that content retrieved by digest matches that digest. +type manifestServiceVerifier struct { + distribution.ManifestService +} + +// Get retrieves the manifest identified by the digest and guarantees it matches the content it is retrieved by. +func (m manifestServiceVerifier) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + manifest, err := m.ManifestService.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + if len(dgst) > 0 { + if err := VerifyManifestIntegrity(manifest, dgst); err != nil { + return nil, err + } + } + return manifest, nil +} + +// Put ensures the manifest is hashable to the returned digest, or returns no digest and an error. +func (m manifestServiceVerifier) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + dgst, err := m.ManifestService.Put(ctx, manifest, options...) + if err != nil { + return "", err + } + if len(dgst) > 0 { + if err := VerifyManifestIntegrity(manifest, dgst); err != nil { + return "", err + } + } + return dgst, nil +} + +// VerifyManifestIntegrity checks the provided manifest against the specified digest and returns an error +// if the manifest does not match that digest. +func VerifyManifestIntegrity(manifest distribution.Manifest, dgst digest.Digest) error { + contentDigest, err := ContentDigestForManifest(manifest, dgst.Algorithm()) + if err != nil { + return err + } + if contentDigest != dgst { + if klog.V(4) { + _, payload, _ := manifest.Payload() + klog.Infof("Mismatched content: %s\n%s", contentDigest, string(payload)) + } + return fmt.Errorf("content integrity error: the manifest retrieved with digest %s does not match the digest calculated from the content %s", dgst, contentDigest) + } + return nil +} + +// ContentDigestForManifest returns the digest in the provided algorithm of the supplied manifest's contents. +func ContentDigestForManifest(manifest distribution.Manifest, algo digest.Algorithm) (digest.Digest, error) { + switch t := manifest.(type) { + case *schema1.SignedManifest: + // schema1 manifest digests are calculated from the payload + if len(t.Canonical) == 0 { + return "", fmt.Errorf("the schema1 manifest does not have a canonical representation") + } + return algo.FromBytes(t.Canonical), nil + default: + _, payload, err := manifest.Payload() + if err != nil { + return "", err + } + return algo.FromBytes(payload), nil + } +} + +// blobStoreVerifier wraps the blobs service and ensures that content retrieved by digest matches that digest. +type blobStoreVerifier struct { + distribution.BlobStore +} + +// Get retrieves the blob identified by the digest and guarantees it matches the content it is retrieved by. +func (b blobStoreVerifier) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + data, err := b.BlobStore.Get(ctx, dgst) + if err != nil { + return nil, err + } + if len(dgst) > 0 { + dataDgst := dgst.Algorithm().FromBytes(data) + if dataDgst != dgst { + return nil, fmt.Errorf("content integrity error: the blob retrieved with digest %s does not match the digest calculated from the content %s", dgst, dataDgst) + } + } + return data, nil +} + +// Open streams the blob identified by the digest and guarantees it matches the content it is retrieved by. +func (b blobStoreVerifier) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rsc, err := b.BlobStore.Open(ctx, dgst) + if err != nil { + return nil, err + } + if len(dgst) > 0 { + return &readSeekCloserVerifier{ + rsc: rsc, + hash: dgst.Algorithm().Hash(), + expect: dgst, + }, nil + } + return rsc, nil +} + +// readSeekCloserVerifier performs validation over the stream returned by a distribution.ReadSeekCloser returned +// by blobService.Open. +type readSeekCloserVerifier struct { + rsc distribution.ReadSeekCloser + hash hash.Hash + expect digest.Digest +} + +// Read verifies the bytes in the underlying stream match the expected digest or returns an error. +func (r *readSeekCloserVerifier) Read(p []byte) (n int, err error) { + n, err = r.rsc.Read(p) + if r.hash != nil { + if n > 0 { + r.hash.Write(p[:n]) + } + if err == io.EOF { + actual := digest.NewDigest(r.expect.Algorithm(), r.hash) + if actual != r.expect { + return n, fmt.Errorf("content integrity error: the blob streamed from digest %s does not match the digest calculated from the content %s", r.expect, actual) + } + } + } + return n, err +} + +// Seek moves the underlying stream and also cancels any streaming hash. Verification is not possible +// with a seek. +func (r *readSeekCloserVerifier) Seek(offset int64, whence int) (int64, error) { + r.hash = nil + return r.rsc.Seek(offset, whence) +} + +// Close closes the underlying stream. +func (r *readSeekCloserVerifier) Close() error { + return r.rsc.Close() +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go new file mode 100644 index 00000000000..a6a9e2e41dd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go @@ -0,0 +1,843 @@ +package registryclient + +import ( + "bytes" + "encoding/hex" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/time/rate" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + registryclient "github.com/docker/distribution/registry/client" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +type mockRetriever struct { + repo distribution.Repository + insecure bool + err error +} + +func (r *mockRetriever) Repository(ctx context.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) { + r.insecure = insecure + return r.repo, r.err +} + +type mockRepository struct { + repoErr, getErr, getByTagErr, getTagErr, tagErr, untagErr, allTagErr, err error + + blobs *mockBlobStore + + manifest distribution.Manifest + tags map[string]string +} + +func (r *mockRepository) Name() string { return "test" } +func (r *mockRepository) Named() reference.Named { + named, _ := reference.WithName("test") + return named +} + +func (r *mockRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return r, r.repoErr +} +func (r *mockRepository) Blobs(ctx context.Context) distribution.BlobStore { return r.blobs } +func (r *mockRepository) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + return false, r.getErr +} +func (r *mockRepository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + for _, option := range options { + if _, ok := option.(distribution.WithTagOption); ok { + return r.manifest, r.getByTagErr + } + } + return r.manifest, r.getErr +} +func (r *mockRepository) Delete(ctx context.Context, dgst digest.Digest) error { + return fmt.Errorf("not implemented") +} +func (r *mockRepository) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + return "", fmt.Errorf("not implemented") +} +func (r *mockRepository) Tags(ctx context.Context) distribution.TagService { + return &mockTagService{repo: r} +} + +type mockBlobStore struct { + distribution.BlobStore + + blobs map[digest.Digest][]byte + + statErr, serveErr, openErr error +} + +func (r *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, r.statErr +} + +func (r *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { + return r.serveErr +} + +func (r *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, r.openErr +} + +func (r *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + b, exists := r.blobs[dgst] + if !exists { + return nil, distribution.ErrBlobUnknown + } + return b, nil +} + +type mockTagService struct { + distribution.TagService + + repo *mockRepository +} + +func (r *mockTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + v, ok := r.repo.tags[tag] + if !ok { + return distribution.Descriptor{}, r.repo.getTagErr + } + dgst, err := digest.Parse(v) + if err != nil { + panic(err) + } + return distribution.Descriptor{Digest: dgst}, r.repo.getTagErr +} + +func (r *mockTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + r.repo.tags[tag] = desc.Digest.String() + return r.repo.tagErr +} + +func (r *mockTagService) Untag(ctx context.Context, tag string) error { + if _, ok := r.repo.tags[tag]; ok { + delete(r.repo.tags, tag) + } + return r.repo.untagErr +} + +func (r *mockTagService) All(ctx context.Context) (res []string, err error) { + err = r.repo.allTagErr + for tag := range r.repo.tags { + res = append(res, tag) + } + return +} + +func (r *mockTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func TestPing(t *testing.T) { + retriever := NewContext(http.DefaultTransport, http.DefaultTransport).WithCredentials(NoCredentials) + + fn404 := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) } + var fn http.HandlerFunc + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if fn != nil { + fn(w, r) + } + }) + server := httptest.NewServer(mux) + defer server.Close() + + uri, _ := url.Parse(server.URL) + + testCases := []struct { + name string + uri url.URL + expectV2 bool + fn http.HandlerFunc + }{ + {name: "http only", uri: url.URL{Scheme: "http", Host: uri.Host}, expectV2: false, fn: fn404}, + {name: "https only", uri: url.URL{Scheme: "https", Host: uri.Host}, expectV2: false, fn: fn404}, + { + name: "403", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(403) + return + } + }, + }, + { + name: "401", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(401) + return + } + }, + }, + { + name: "200", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(200) + return + } + }, + }, + { + name: "has header but 500", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") + w.WriteHeader(500) + return + } + }, + }, + { + name: "no header, 500", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: false, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(500) + return + } + }, + }, + } + + for _, test := range testCases { + fn = test.fn + _, err := retriever.ping(test.uri, true, retriever.InsecureTransport) + if (err != nil && strings.Contains(err.Error(), "does not support v2 API")) == test.expectV2 { + t.Errorf("%s: Expected ErrNotV2Registry, got %v", test.name, err) + } + } +} + +var unlimited = rate.NewLimiter(rate.Inf, 100) + +type temporaryError struct{} + +func (temporaryError) Error() string { return "temporary" } +func (temporaryError) Timeout() bool { return false } +func (temporaryError) Temporary() bool { return true } + +func TestShouldRetry(t *testing.T) { + r := NewLimitedRetryRepository(nil, 1, unlimited).(*retryRepository) + sleeps := 0 + r.sleepFn = func(time.Duration) { sleeps++ } + + // nil error doesn't consume retries + if r.shouldRetry(0, nil) { + t.Fatal(r) + } + + // normal error doesn't consume retries + if r.shouldRetry(0, fmt.Errorf("error")) { + t.Fatal(r) + } + + // docker error doesn't consume retries + if r.shouldRetry(0, errcode.ErrorCodeDenied) { + t.Fatal(r) + } + if sleeps != 0 { + t.Fatal(sleeps) + } + + now := time.Unix(1, 0) + nowFn = func() time.Time { + return now + } + // should retry a temporary error + r = NewLimitedRetryRepository(nil, 1, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = func(time.Duration) { sleeps++ } + if !r.shouldRetry(0, temporaryError{}) { + t.Fatal(r) + } + if r.shouldRetry(1, temporaryError{}) { + t.Fatal(r) + } + if sleeps != 1 { + t.Fatal(sleeps) + } +} + +func TestRetryFailure(t *testing.T) { + sleeps := 0 + sleepFn := func(time.Duration) { sleeps++ } + + ctx := context.Background() + // do not retry on Manifests() + repo := &mockRepository{repoErr: fmt.Errorf("does not support v2 API")} + r := NewLimitedRetryRepository(repo, 1, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err := r.Manifests(ctx); m != nil || err != repo.repoErr || r.retries != 1 { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + + // do not retry on Manifests() + repo = &mockRepository{repoErr: temporaryError{}} + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err := r.Manifests(ctx); m != nil || err != repo.repoErr || r.retries != 4 { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + + // do not retry on non standard errors + repo = &mockRepository{getErr: fmt.Errorf("does not support v2 API")} + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + m, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr || r.retries != 4 { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + + // verify docker known errors + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: errcode.ErrorCodeTooManyRequests.WithDetail(struct{}{}), + statErr: errcode.ErrorCodeUnavailable.WithDetail(struct{}{}), + // not retriable + openErr: errcode.ErrorCodeUnknown.WithDetail(struct{}{}), + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 1 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 3 { + t.Fatal(sleeps) + } + + sleeps = 0 + r.retries = 1 + b := r.Blobs(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := b.Stat(ctx, digest.Digest("x")); err != repo.blobs.statErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if err := b.ServeBlob(ctx, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 4 + if _, err := b.Open(ctx, digest.Digest("foo")); err != repo.blobs.openErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + // Open did not retry + if sleeps != 3 { + t.Fatal(sleeps) + } + + // verify unknown client errors + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusTooManyRequests}, + statErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusServiceUnavailable}, + openErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusInternalServerError}, + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 1 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 3 { + t.Fatal(sleeps) + } + + sleeps = 0 + r.retries = 1 + b = r.Blobs(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := b.Stat(ctx, digest.Digest("x")); err != repo.blobs.statErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if err := b.ServeBlob(ctx, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 4 + if _, err := b.Open(ctx, digest.Digest("foo")); err != repo.blobs.openErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + // Open did not retry + if sleeps != 7 { + t.Fatal(sleeps) + } + + // verify more unknown client errors + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusBadGateway}, + statErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusGatewayTimeout}, + openErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusInternalServerError}, + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 1 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 3 { + t.Fatal(sleeps) + } + + sleeps = 0 + r.retries = 1 + b = r.Blobs(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := b.Stat(ctx, digest.Digest("x")); err != repo.blobs.statErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if err := b.ServeBlob(ctx, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 4 + if _, err := b.Open(ctx, digest.Digest("foo")); err != repo.blobs.openErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + // Open did not retry + if sleeps != 7 { + t.Fatal(sleeps) + } + + // retry with temporary errors + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: temporaryError{}, + statErr: temporaryError{}, + openErr: temporaryError{}, + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 1 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 3 { + t.Fatal(sleeps) + } +} + +func Test_verifyManifest_Get(t *testing.T) { + tests := []struct { + name string + dgst digest.Digest + err error + manifest distribution.Manifest + options []distribution.ManifestServiceOption + want distribution.Manifest + wantErr bool + }{ + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload1)}, + want: &fakeManifest{payload: []byte(payload1)}, + }, + { + dgst: payload2Digest, + manifest: &fakeManifest{payload: []byte(payload2)}, + want: &fakeManifest{payload: []byte(payload2)}, + }, + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload2)}, + wantErr: true, + }, + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload1), err: fmt.Errorf("unknown")}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms := &fakeManifestService{err: tt.err, manifest: tt.manifest} + m := manifestServiceVerifier{ + ManifestService: ms, + } + ctx := context.Background() + got, err := m.Get(ctx, tt.dgst, tt.options...) + if (err != nil) != tt.wantErr { + t.Errorf("verifyManifest.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("verifyManifest.Get() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_verifyManifest_Put(t *testing.T) { + tests := []struct { + name string + dgst digest.Digest + err error + manifest distribution.Manifest + options []distribution.ManifestServiceOption + want digest.Digest + wantErr string + }{ + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload1)}, + want: payload1Digest, + }, + { + dgst: payload2Digest, + manifest: &fakeManifest{payload: []byte(payload2)}, + want: payload2Digest, + }, + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload2)}, + wantErr: "the manifest retrieved with digest sha256:59685d14054198fee6005106a66462a924cabe21f4b0c7c1fdf4da95ccee52bd does not match the digest calculated from the content sha256:b79e87ded1ea5293efe92bdb3caa9b7212cfa7c98aafb7c1c568d11d43519968", + }, + { + err: fmt.Errorf("put error"), + manifest: &fakeManifest{payload: []byte(payload2)}, + wantErr: "put error", + }, + { + manifest: &fakeManifest{payload: []byte(payload2)}, + }, + { + manifest: &fakeManifest{payload: []byte(payload1), err: fmt.Errorf("unknown")}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms := &fakeManifestService{err: tt.err, manifest: tt.manifest, digest: tt.dgst} + m := manifestServiceVerifier{ + ManifestService: ms, + } + ctx := context.Background() + got, err := m.Put(ctx, tt.manifest, tt.options...) + if len(tt.wantErr) > 0 && err != nil && !strings.Contains(err.Error(), tt.wantErr) { + t.Fatalf("verifyManifest.Get() error = %v, wantErr %v", err, tt.wantErr) + } + if (err != nil) != (len(tt.wantErr) > 0) { + t.Fatalf("verifyManifest.Get() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("verifyManifest.Get() = %v, want %v", got, tt.want) + } + }) + } +} + +const ( + payload1 = `{"some":"content"}` + payload2 = `{"some":"content"} ` +) + +var ( + payload1Digest = digest.SHA256.FromString(payload1) + payload2Digest = digest.SHA256.FromString(payload2) +) + +type fakeManifest struct { + mediaType string + payload []byte + err error +} + +func (m *fakeManifest) References() []distribution.Descriptor { + panic("not implemented") +} + +func (m *fakeManifest) Payload() (mediaType string, payload []byte, err error) { + return m.mediaType, m.payload, m.err +} + +type fakeManifestService struct { + digest digest.Digest + manifest distribution.Manifest + err error +} + +func (s *fakeManifestService) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + panic("not implemented") +} + +func (s *fakeManifestService) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + return s.manifest, s.err +} + +func (s *fakeManifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + return s.digest, s.err +} + +func (s *fakeManifestService) Delete(ctx context.Context, dgst digest.Digest) error { + panic("not implemented") +} + +func Test_blobStoreVerifier_Get(t *testing.T) { + tests := []struct { + name string + bytes []byte + err error + dgst digest.Digest + want []byte + wantErr bool + }{ + { + dgst: payload1Digest, + bytes: []byte(payload1), + want: []byte(payload1), + }, + { + dgst: payload2Digest, + bytes: []byte(payload2), + want: []byte(payload2), + }, + { + dgst: payload1Digest, + bytes: []byte(payload2), + wantErr: true, + }, + { + dgst: payload1Digest, + bytes: []byte(payload1), + err: fmt.Errorf("unknown"), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bs := &fakeBlobStore{err: tt.err, bytes: tt.bytes} + b := blobStoreVerifier{ + BlobStore: bs, + } + ctx := context.Background() + got, err := b.Get(ctx, tt.dgst) + if (err != nil) != tt.wantErr { + t.Errorf("blobStoreVerifier.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("blobStoreVerifier.Get() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_blobStoreVerifier_Open(t *testing.T) { + tests := []struct { + name string + bytes []byte + err error + dgst digest.Digest + want func(t *testing.T, got distribution.ReadSeekCloser) + wantErr bool + }{ + { + dgst: payload1Digest, + bytes: []byte(payload1), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + data, err := ioutil.ReadAll(got) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload1), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload2Digest, + bytes: []byte(payload2), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + data, err := ioutil.ReadAll(got) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload2), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload1Digest, + bytes: []byte(payload2), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + data, err := ioutil.ReadAll(got) + if err == nil || !strings.Contains(err.Error(), "content integrity error") || !strings.Contains(err.Error(), payload2Digest.String()) { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload2), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload1Digest, + bytes: []byte(payload2), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + _, err := got.Seek(0, 0) + if err == nil || err.Error() != "invoked seek" { + t.Fatal(err) + } + data, err := ioutil.ReadAll(got) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload2), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload1Digest, + bytes: []byte(payload1), + err: fmt.Errorf("unknown"), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bs := &fakeBlobStore{err: tt.err, bytes: tt.bytes} + b := blobStoreVerifier{ + BlobStore: bs, + } + ctx := context.Background() + got, err := b.Open(ctx, tt.dgst) + if (err != nil) != tt.wantErr { + t.Errorf("blobStoreVerifier.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if err != nil { + return + } + tt.want(t, got) + }) + } +} + +type fakeSeekCloser struct { + *bytes.Buffer +} + +func (f fakeSeekCloser) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("invoked seek") +} + +func (f fakeSeekCloser) Close() error { + return fmt.Errorf("not implemented") +} + +type fakeBlobStore struct { + bytes []byte + err error +} + +func (s *fakeBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + return s.bytes, s.err +} + +func (s *fakeBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return fakeSeekCloser{bytes.NewBuffer(s.bytes)}, s.err +} + +func (s *fakeBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (s *fakeBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + panic("not implemented") +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go new file mode 100644 index 00000000000..c9d22c76073 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go @@ -0,0 +1,90 @@ +package registryclient + +import ( + "net/url" + "sync" + + "github.com/docker/distribution/registry/client/auth" +) + +var ( + NoCredentials auth.CredentialStore = &noopCredentialStore{} +) + +type RefreshTokenStore interface { + RefreshToken(url *url.URL, service string) string + SetRefreshToken(url *url.URL, service string, token string) +} + +func NewRefreshTokenStore() RefreshTokenStore { + return &refreshTokenStore{} +} + +type refreshTokenKey struct { + url string + service string +} + +type refreshTokenStore struct { + lock sync.Mutex + store map[refreshTokenKey]string +} + +func (s *refreshTokenStore) RefreshToken(url *url.URL, service string) string { + s.lock.Lock() + defer s.lock.Unlock() + return s.store[refreshTokenKey{url: url.String(), service: service}] +} + +func (s *refreshTokenStore) SetRefreshToken(url *url.URL, service string, token string) { + s.lock.Lock() + defer s.lock.Unlock() + if s.store == nil { + s.store = make(map[refreshTokenKey]string) + } + s.store[refreshTokenKey{url: url.String(), service: service}] = token +} + +type noopCredentialStore struct{} + +func (s *noopCredentialStore) Basic(url *url.URL) (string, string) { + return "", "" +} + +func (s *noopCredentialStore) RefreshToken(url *url.URL, service string) string { + return "" +} + +func (s *noopCredentialStore) SetRefreshToken(url *url.URL, service string, token string) { +} + +func NewBasicCredentials() *BasicCredentials { + return &BasicCredentials{refreshTokenStore: &refreshTokenStore{}} +} + +type basicForURL struct { + url url.URL + username, password string +} + +type BasicCredentials struct { + creds []basicForURL + *refreshTokenStore +} + +func (c *BasicCredentials) Add(url *url.URL, username, password string) { + c.creds = append(c.creds, basicForURL{*url, username, password}) +} + +func (c *BasicCredentials) Basic(url *url.URL) (string, string) { + for _, cred := range c.creds { + if len(cred.url.Host) != 0 && cred.url.Host != url.Host { + continue + } + if len(cred.url.Path) != 0 && cred.url.Path != url.Path { + continue + } + return cred.username, cred.password + } + return "", "" +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials_test.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials_test.go new file mode 100644 index 00000000000..b33b0d7fcd4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials_test.go @@ -0,0 +1,20 @@ +package registryclient + +import ( + "net/url" + "testing" +) + +func TestBasicCredentials(t *testing.T) { + creds := NewBasicCredentials() + creds.Add(&url.URL{Host: "localhost"}, "test", "other") + if u, p := creds.Basic(&url.URL{Host: "test"}); u != "" || p != "" { + t.Fatalf("unexpected response: %s %s", u, p) + } + if u, p := creds.Basic(&url.URL{Host: "localhost"}); u != "test" || p != "other" { + t.Fatalf("unexpected response: %s %s", u, p) + } + if u, p := creds.Basic(&url.URL{Host: "localhost", Path: "/foo"}); u != "test" || p != "other" { + t.Fatalf("unexpected response: %s %s", u, p) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations.go new file mode 100644 index 00000000000..4ca0617a1a5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations.go @@ -0,0 +1,215 @@ +package trigger + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/library-go/pkg/image/referencemutator" +) + +func CalculateAnnotationTriggers(m metav1.Object, prefix string) (string, string, []ObjectFieldTrigger, error) { + var key, namespace string + if namespace = m.GetNamespace(); len(namespace) > 0 { + key = prefix + namespace + "/" + m.GetName() + } else { + key = prefix + m.GetName() + } + t, ok := m.GetAnnotations()[TriggerAnnotationKey] + if !ok { + return key, namespace, nil, nil + } + triggers := []ObjectFieldTrigger{} + if err := json.Unmarshal([]byte(t), &triggers); err != nil { + return key, namespace, nil, err + } + if hasDuplicateTriggers(triggers) { + return key, namespace, nil, fmt.Errorf("duplicate triggers are not allowed") + } + return key, namespace, triggers, nil +} + +func hasDuplicateTriggers(triggers []ObjectFieldTrigger) bool { + for i := range triggers { + for j := i + 1; j < len(triggers); j++ { + if triggers[i].FieldPath == triggers[j].FieldPath { + return true + } + } + } + return false +} + +func parseContainerReference(path string) (init bool, selector string, remainder string, ok bool) { + switch { + case strings.HasPrefix(path, "containers["): + remainder = strings.TrimPrefix(path, "containers[") + case strings.HasPrefix(path, "initContainers["): + init = true + remainder = strings.TrimPrefix(path, "initContainers[") + default: + return false, "", "", false + } + end := strings.Index(remainder, "]") + if end == -1 { + return false, "", "", false + } + selector = remainder[:end] + remainder = remainder[end+1:] + if len(remainder) > 0 && remainder[0] == '.' { + remainder = remainder[1:] + } + return init, selector, remainder, true +} + +func findContainerBySelector(spec referencemutator.PodSpecReferenceMutator, init bool, selector string) (referencemutator.ContainerMutator, bool) { + if i, err := strconv.Atoi(selector); err == nil { + return spec.GetContainerByIndex(init, i) + } + // TODO: potentially make this more flexible, like whitespace + if name := strings.TrimSuffix(strings.TrimPrefix(selector, "?(@.name==\""), "\")"); name != selector { + return spec.GetContainerByName(name) + } + return nil, false +} + +// ContainerForObjectFieldPath returns a reference to the container in the object with pod spec +// underneath fieldPath. Returns error if no such container exists or the field path is invalid. +// Returns the remaining field path beyond the container, if any. +func ContainerForObjectFieldPath(obj runtime.Object, fieldPath string) (referencemutator.ContainerMutator, string, error) { + spec, err := referencemutator.GetPodSpecReferenceMutator(obj) + if err != nil { + return nil, fieldPath, err + } + specPath := spec.Path().String() + containerPath := strings.TrimPrefix(fieldPath, specPath) + if containerPath == fieldPath { + return nil, fieldPath, fmt.Errorf("1 field path is not valid: %s", fieldPath) + } + containerPath = strings.TrimPrefix(containerPath, ".") + init, selector, remainder, ok := parseContainerReference(containerPath) + if !ok { + return nil, fieldPath, fmt.Errorf("2 field path is not valid: %s", fieldPath) + } + container, ok := findContainerBySelector(spec, init, selector) + if !ok { + return nil, fieldPath, fmt.Errorf("no such container: %s", selector) + } + return container, remainder, nil +} + +// UpdateObjectFromImages attempts to set the appropriate object information. If changes are necessary, it lazily copies +// obj and returns it, or if no changes are necessary returns nil. +func UpdateObjectFromImages(obj runtime.Object, tagRetriever TagRetriever) (runtime.Object, error) { + var updated runtime.Object + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + spec, err := referencemutator.GetPodSpecReferenceMutator(obj) + if err != nil { + return nil, err + } + path := spec.Path() + basePath := path.String() + "." + _, _, triggers, err := CalculateAnnotationTriggers(m, "/") + if err != nil { + return nil, err + } + klog.V(5).Infof("%T/%s has triggers: %#v", obj, m.GetName(), triggers) + for _, trigger := range triggers { + if trigger.Paused { + continue + } + fieldPath := trigger.FieldPath + if !strings.HasPrefix(trigger.FieldPath, basePath) { + klog.V(5).Infof("%T/%s trigger %s did not match base path %s", obj, m.GetName(), trigger.FieldPath, basePath) + continue + } + fieldPath = strings.TrimPrefix(fieldPath, basePath) + + namespace := trigger.From.Namespace + if len(namespace) == 0 { + namespace = m.GetNamespace() + } + ref, _, ok := tagRetriever.ImageStreamTag(namespace, trigger.From.Name) + if !ok { + klog.V(5).Infof("%T/%s detected no pending image on %s from %#v", obj, m.GetName(), trigger.FieldPath, trigger.From) + continue + } + + init, selector, remainder, ok := parseContainerReference(fieldPath) + if !ok || remainder != "image" { + return nil, fmt.Errorf("field path is not valid: %s", trigger.FieldPath) + } + + container, ok := findContainerBySelector(spec, init, selector) + if !ok { + return nil, fmt.Errorf("no such container: %s", trigger.FieldPath) + } + + if container.GetImage() != ref { + if updated == nil { + updated = obj.DeepCopyObject() + spec, _ = referencemutator.GetPodSpecReferenceMutator(updated) + container, _ = findContainerBySelector(spec, init, selector) + } + klog.V(5).Infof("%T/%s detected change on %s = %s", obj, m.GetName(), trigger.FieldPath, ref) + container.SetImage(ref) + } + } + return updated, nil +} + +// ContainerImageChanged returns true if any container image referenced by newTriggers changed. +func ContainerImageChanged(oldObj, newObj runtime.Object, newTriggers []ObjectFieldTrigger) bool { + for _, trigger := range newTriggers { + if trigger.Paused { + continue + } + + newContainer, _, err := ContainerForObjectFieldPath(newObj, trigger.FieldPath) + if err != nil { + klog.V(5).Infof("%v", err) + continue + } + + oldContainer, _, err := ContainerForObjectFieldPath(oldObj, trigger.FieldPath) + if err != nil { + // might just be a result of the update + continue + } + + if newContainer.GetImage() != oldContainer.GetImage() { + return true + } + } + + return false +} + +type AnnotationUpdater interface { + Update(obj runtime.Object) error +} + +type AnnotationReactor struct { + Updater AnnotationUpdater +} + +func (r *AnnotationReactor) ImageChanged(obj runtime.Object, tagRetriever TagRetriever) error { + changed, err := UpdateObjectFromImages(obj, tagRetriever) + if err != nil { + return err + } + if changed != nil { + return r.Updater.Update(changed) + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations_test.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations_test.go new file mode 100644 index 00000000000..afb3fff5e69 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations_test.go @@ -0,0 +1,312 @@ +package trigger + +import ( + "encoding/json" + "reflect" + "sort" + "strings" + "testing" + + kapps "k8s.io/api/apps/v1beta1" + kapiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/util/jsonpath" +) + +type fakeTagResponse struct { + Namespace string + Name string + Ref string + RV int64 +} + +type fakeTagRetriever []fakeTagResponse + +func (r fakeTagRetriever) ImageStreamTag(namespace, name string) (string, int64, bool) { + for _, resp := range r { + if resp.Namespace != namespace || resp.Name != name { + continue + } + return resp.Ref, resp.RV, true + } + return "", 0, false +} + +type fakeUpdater struct { + Object runtime.Object + Err error +} + +func (u *fakeUpdater) Update(obj runtime.Object) error { + u.Object = obj + return u.Err +} + +func testStatefulSet(params []ObjectFieldTrigger, containers map[string]string) *kapps.StatefulSet { + obj := &kapps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: kapps.StatefulSetSpec{ + Template: kapiv1.PodTemplateSpec{}, + }, + } + data, _ := json.Marshal(params) + obj.Annotations = map[string]string{TriggerAnnotationKey: string(data)} + var names, initNames []string + for k := range containers { + if strings.HasPrefix(k, "-") { + initNames = append(initNames, k[1:]) + } else { + names = append(names, k) + } + } + sort.Sort(sort.StringSlice(initNames)) + sort.Sort(sort.StringSlice(names)) + for _, name := range initNames { + obj.Spec.Template.Spec.InitContainers = append(obj.Spec.Template.Spec.InitContainers, kapiv1.Container{Name: name, Image: containers["-"+name]}) + } + for _, name := range names { + obj.Spec.Template.Spec.Containers = append(obj.Spec.Template.Spec.Containers, kapiv1.Container{Name: name, Image: containers[name]}) + } + return obj +} + +func TestAnnotationJSONPath(t *testing.T) { + _, err := jsonpath.Parse("field_path", "spec.template.spec.containers[?(@.name==\"test\")].image") + if err != nil { + t.Error(err) + } +} + +func TestAnnotationsReactor(t *testing.T) { + testCases := []struct { + tags []fakeTagResponse + obj *kapps.StatefulSet + response *kapps.StatefulSet + expected *kapps.StatefulSet + expectedErr bool + }{ + { + obj: &kapps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }, + }, + + { + // no container, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, nil), + expectedErr: true, + }, + + { + // container, but path spec is wrong, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")]", + }, + }, map[string]string{"test": ""}), + expectedErr: true, + }, + { + // container, but path spec is wrong, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\").image", + }, + }, map[string]string{"test": ""}), + expectedErr: true, + }, + { + // container, but path spec is wrong, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[@.name=test].image", + }, + }, map[string]string{"test": ""}), + expectedErr: true, + }, + + { + // no ref, no change + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": ""}), + }, + + { + // resolved without a change in another namespace + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": "image-lookup-1"}), + }, + + { + // resolved for init containers + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.initContainers[?(@.name==\"test\")].image", + }, + }, map[string]string{"-test": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.initContainers[?(@.name==\"test\")].image", + }, + }, map[string]string{"-test": "image-lookup-1"}), + }, + + { + // will not resolve if not automatic + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + Paused: true, + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": ""}), + response: &kapps.StatefulSet{}, + }, + + { + // will fire if only one trigger resolves + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "", "test2": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "image-lookup-1", "test2": ""}), + }, + + { + // will fire if a trigger has already been resolved before + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "", "test2": "old-image"}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "image-lookup-1", "test2": "old-image"}), + }, + + { + // will fire if both triggers are resolved + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "", "test2": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "image-lookup-1", "test2": "image-lookup-1"}), + }, + } + + for i, test := range testCases { + u := &fakeUpdater{} + r := AnnotationReactor{Updater: u} + initial := test.obj.DeepCopy() + err := r.ImageChanged(test.obj, fakeTagRetriever(test.tags)) + if !equality.Semantic.DeepEqual(initial, test.obj) { + t.Errorf("%d: should not have mutated: %s", i, diff.ObjectReflectDiff(initial, test.obj)) + } + switch { + case err == nil && test.expectedErr, err != nil && !test.expectedErr: + t.Errorf("%d: unexpected error: %v", i, err) + continue + case err != nil: + continue + } + if test.expected != nil { + if u.Object == nil { + t.Errorf("%d: no response defined", i) + continue + } + if !reflect.DeepEqual(test.expected, u.Object) { + t.Errorf("%d: not equal: %s", i, diff.ObjectReflectDiff(test.expected, u.Object)) + continue + } + } else { + if u.Object != nil { + t.Errorf("%d: unexpected update: %v", i, u.Object) + continue + } + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/interfaces.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/interfaces.go new file mode 100644 index 00000000000..49fa88a02f9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/interfaces.go @@ -0,0 +1,7 @@ +package trigger + +// TagRetriever returns information about a tag, including whether it exists +// and the observed resource version of the object at the time the tag was loaded. +type TagRetriever interface { + ImageStreamTag(namespace, name string) (ref string, rv int64, ok bool) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/types.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/types.go new file mode 100644 index 00000000000..478ca456a18 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/types.go @@ -0,0 +1,29 @@ +package trigger + +// TriggerAnnotationKey is the annotation used on resources to signal they wish to have +// container image references changed when an image stream tag is updated. Today, only +// containers can be specified by fieldPath. +const TriggerAnnotationKey = "image.openshift.io/triggers" + +// ObjectFieldTrigger links a field on the current object to another object for mutation. +type ObjectFieldTrigger struct { + // from is the object this should trigger from. The kind and name fields must be set. + From ObjectReference `json:"from"` + // fieldPath is a JSONPath string to the field to edit on the object. Required. + FieldPath string `json:"fieldPath"` + // paused is true if this trigger is temporarily disabled. Optional. + Paused bool `json:"paused,omitempty"` +} + +// ObjectReference identifies an object by its name and kind. +type ObjectReference struct { + // kind is the referenced object's schema. + Kind string `json:"kind"` + // name is the name of the object. + Name string `json:"name"` + // namespace is the namespace the object is located in. Optional if the object is not + // namespaced, or if left empty on a namespaced object, means the current namespace. + Namespace string `json:"namespace,omitempty"` + // apiVersion is the group and version the type exists in. Optional. + APIVersion string `json:"apiVersion,omitempty"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go b/vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go new file mode 100644 index 00000000000..4a35356534d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go @@ -0,0 +1,228 @@ +package legacygroupification + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + appsv1 "github.com/openshift/api/apps/v1" + authorizationv1 "github.com/openshift/api/authorization/v1" + buildv1 "github.com/openshift/api/build/v1" + imagev1 "github.com/openshift/api/image/v1" + networkv1 "github.com/openshift/api/network/v1" + oauthv1 "github.com/openshift/api/oauth/v1" + projectv1 "github.com/openshift/api/project/v1" + quotav1 "github.com/openshift/api/quota/v1" + routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" + templatev1 "github.com/openshift/api/template/v1" + userv1 "github.com/openshift/api/user/v1" +) + +// deprecated +func IsOAPI(gvk schema.GroupVersionKind) bool { + if len(gvk.Group) > 0 { + return false + } + + _, ok := oapiKindsToGroup[gvk.Kind] + return ok +} + +// deprecated +func OAPIToGroupifiedGVK(gvk *schema.GroupVersionKind) { + if len(gvk.Group) > 0 { + return + } + + newGroup, ok := oapiKindsToGroup[gvk.Kind] + if !ok { + return + } + gvk.Group = newGroup +} + +// deprecated +func OAPIToGroupified(uncast runtime.Object, gvk *schema.GroupVersionKind) { + if len(gvk.Group) > 0 { + return + } + + switch obj := uncast.(type) { + case *unstructured.Unstructured: + newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) + if len(newGroup) > 0 { + gvk.Group = newGroup + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + } + case *unstructured.UnstructuredList: + newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) + if len(newGroup) > 0 { + gvk.Group = newGroup + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + } + + case *appsv1.DeploymentConfig, *appsv1.DeploymentConfigList, + *appsv1.DeploymentConfigRollback, + *appsv1.DeploymentLog, + *appsv1.DeploymentRequest: + gvk.Group = appsv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *authorizationv1.ClusterRoleBinding, *authorizationv1.ClusterRoleBindingList, + *authorizationv1.ClusterRole, *authorizationv1.ClusterRoleList, + *authorizationv1.Role, *authorizationv1.RoleList, + *authorizationv1.RoleBinding, *authorizationv1.RoleBindingList, + *authorizationv1.RoleBindingRestriction, *authorizationv1.RoleBindingRestrictionList, + *authorizationv1.SubjectRulesReview, *authorizationv1.SelfSubjectRulesReview, + *authorizationv1.ResourceAccessReview, *authorizationv1.LocalResourceAccessReview, + *authorizationv1.SubjectAccessReview, *authorizationv1.LocalSubjectAccessReview: + gvk.Group = authorizationv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *buildv1.BuildConfig, *buildv1.BuildConfigList, + *buildv1.Build, *buildv1.BuildList, + *buildv1.BuildLog, + *buildv1.BuildRequest, + *buildv1.BinaryBuildRequestOptions: + gvk.Group = buildv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *imagev1.Image, *imagev1.ImageList, + *imagev1.ImageSignature, + *imagev1.ImageStreamImage, + *imagev1.ImageStreamImport, + *imagev1.ImageStreamMapping, + *imagev1.ImageStream, *imagev1.ImageStreamList, + *imagev1.ImageStreamTag: + gvk.Group = imagev1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *networkv1.ClusterNetwork, *networkv1.ClusterNetworkList, + *networkv1.NetNamespace, *networkv1.NetNamespaceList, + *networkv1.HostSubnet, *networkv1.HostSubnetList, + *networkv1.EgressNetworkPolicy, *networkv1.EgressNetworkPolicyList: + gvk.Group = networkv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *projectv1.Project, *projectv1.ProjectList, + *projectv1.ProjectRequest: + gvk.Group = projectv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *quotav1.ClusterResourceQuota, *quotav1.ClusterResourceQuotaList, + *quotav1.AppliedClusterResourceQuota, *quotav1.AppliedClusterResourceQuotaList: + gvk.Group = quotav1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *oauthv1.OAuthAuthorizeToken, *oauthv1.OAuthAuthorizeTokenList, + *oauthv1.OAuthClientAuthorization, *oauthv1.OAuthClientAuthorizationList, + *oauthv1.OAuthClient, *oauthv1.OAuthClientList, + *oauthv1.OAuthAccessToken, *oauthv1.OAuthAccessTokenList: + gvk.Group = oauthv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *routev1.Route, *routev1.RouteList: + gvk.Group = routev1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *securityv1.SecurityContextConstraints, *securityv1.SecurityContextConstraintsList, + *securityv1.PodSecurityPolicySubjectReview, + *securityv1.PodSecurityPolicySelfSubjectReview, + *securityv1.PodSecurityPolicyReview: + gvk.Group = securityv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *templatev1.Template, *templatev1.TemplateList: + gvk.Group = templatev1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *userv1.Group, *userv1.GroupList, + *userv1.Identity, *userv1.IdentityList, + *userv1.UserIdentityMapping, + *userv1.User, *userv1.UserList: + gvk.Group = userv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + } +} + +var oapiKindsToGroup = map[string]string{ + "DeploymentConfigRollback": "apps.openshift.io", + "DeploymentConfig": "apps.openshift.io", "DeploymentConfigList": "apps.openshift.io", + "DeploymentLog": "apps.openshift.io", + "DeploymentRequest": "apps.openshift.io", + "ClusterRoleBinding": "authorization.openshift.io", "ClusterRoleBindingList": "authorization.openshift.io", + "ClusterRole": "authorization.openshift.io", "ClusterRoleList": "authorization.openshift.io", + "RoleBindingRestriction": "authorization.openshift.io", "RoleBindingRestrictionList": "authorization.openshift.io", + "RoleBinding": "authorization.openshift.io", "RoleBindingList": "authorization.openshift.io", + "Role": "authorization.openshift.io", "RoleList": "authorization.openshift.io", + "SubjectRulesReview": "authorization.openshift.io", "SelfSubjectRulesReview": "authorization.openshift.io", + "ResourceAccessReview": "authorization.openshift.io", "LocalResourceAccessReview": "authorization.openshift.io", + "SubjectAccessReview": "authorization.openshift.io", "LocalSubjectAccessReview": "authorization.openshift.io", + "BuildConfig": "build.openshift.io", "BuildConfigList": "build.openshift.io", + "Build": "build.openshift.io", "BuildList": "build.openshift.io", + "BinaryBuildRequestOptions": "build.openshift.io", + "BuildLog": "build.openshift.io", + "BuildRequest": "build.openshift.io", + "Image": "image.openshift.io", "ImageList": "image.openshift.io", + "ImageSignature": "image.openshift.io", + "ImageStreamImage": "image.openshift.io", + "ImageStreamImport": "image.openshift.io", + "ImageStreamMapping": "image.openshift.io", + "ImageStream": "image.openshift.io", "ImageStreamList": "image.openshift.io", + "ImageStreamTag": "image.openshift.io", "ImageStreamTagList": "image.openshift.io", + "ClusterNetwork": "network.openshift.io", "ClusterNetworkList": "network.openshift.io", + "EgressNetworkPolicy": "network.openshift.io", "EgressNetworkPolicyList": "network.openshift.io", + "HostSubnet": "network.openshift.io", "HostSubnetList": "network.openshift.io", + "NetNamespace": "network.openshift.io", "NetNamespaceList": "network.openshift.io", + "OAuthAccessToken": "oauth.openshift.io", "OAuthAccessTokenList": "oauth.openshift.io", + "OAuthAuthorizeToken": "oauth.openshift.io", "OAuthAuthorizeTokenList": "oauth.openshift.io", + "OAuthClientAuthorization": "oauth.openshift.io", "OAuthClientAuthorizationList": "oauth.openshift.io", + "OAuthClient": "oauth.openshift.io", "OAuthClientList": "oauth.openshift.io", + "Project": "project.openshift.io", "ProjectList": "project.openshift.io", + "ProjectRequest": "project.openshift.io", + "ClusterResourceQuota": "quota.openshift.io", "ClusterResourceQuotaList": "quota.openshift.io", + "AppliedClusterResourceQuota": "quota.openshift.io", "AppliedClusterResourceQuotaList": "quota.openshift.io", + "Route": "route.openshift.io", "RouteList": "route.openshift.io", + "SecurityContextConstraints": "security.openshift.io", "SecurityContextConstraintsList": "security.openshift.io", + "PodSecurityPolicySubjectReview": "security.openshift.io", + "PodSecurityPolicySelfSubjectReview": "security.openshift.io", + "PodSecurityPolicyReview": "security.openshift.io", + "Template": "template.openshift.io", "TemplateList": "template.openshift.io", + "Group": "user.openshift.io", "GroupList": "user.openshift.io", + "Identity": "user.openshift.io", "IdentityList": "user.openshift.io", + "UserIdentityMapping": "user.openshift.io", + "User": "user.openshift.io", "UserList": "user.openshift.io", +} + +func fixOAPIGroupKindInTopLevelUnstructured(obj map[string]interface{}) string { + kind, ok := obj["kind"] + if !ok { + return "" + } + kindStr, ok := kind.(string) + if !ok { + return "" + } + newGroup, ok := oapiKindsToGroup[kindStr] + if !ok { + return "" + } + + apiVersion, ok := obj["apiVersion"] + if !ok { + return newGroup + } + apiVersionStr, ok := apiVersion.(string) + if !ok { + return newGroup + } + + if apiVersionStr != "v1" { + return newGroup + } + obj["apiVersion"] = newGroup + "/v1" + + return newGroup +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/networkapihelpers/annotations.go b/vendor/github.com/openshift/library-go/pkg/network/networkapihelpers/annotations.go new file mode 100644 index 00000000000..87f4cad5b8c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/networkapihelpers/annotations.go @@ -0,0 +1,63 @@ +package networkapihelpers + +import ( + "fmt" + "strings" + + networkv1 "github.com/openshift/api/network/v1" +) + +type PodNetworkAction string + +const ( + // Acceptable values for ChangePodNetworkAnnotation + GlobalPodNetwork PodNetworkAction = "global" + JoinPodNetwork PodNetworkAction = "join" + IsolatePodNetwork PodNetworkAction = "isolate" +) + +var ( + ErrorPodNetworkAnnotationNotFound = fmt.Errorf("ChangePodNetworkAnnotation not found") +) + +// GetChangePodNetworkAnnotation fetches network change intent from NetNamespace +func GetChangePodNetworkAnnotation(netns *networkv1.NetNamespace) (PodNetworkAction, string, error) { + value, ok := netns.Annotations[networkv1.ChangePodNetworkAnnotation] + if !ok { + return PodNetworkAction(""), "", ErrorPodNetworkAnnotationNotFound + } + + args := strings.Split(value, ":") + switch PodNetworkAction(args[0]) { + case GlobalPodNetwork: + return GlobalPodNetwork, "", nil + case JoinPodNetwork: + if len(args) != 2 { + return PodNetworkAction(""), "", fmt.Errorf("invalid namespace for join pod network: %s", value) + } + namespace := args[1] + return JoinPodNetwork, namespace, nil + case IsolatePodNetwork: + return IsolatePodNetwork, "", nil + } + + return PodNetworkAction(""), "", fmt.Errorf("invalid ChangePodNetworkAnnotation: %s", value) +} + +// SetChangePodNetworkAnnotation sets network change intent on NetNamespace +func SetChangePodNetworkAnnotation(netns *networkv1.NetNamespace, action PodNetworkAction, params string) { + if netns.Annotations == nil { + netns.Annotations = make(map[string]string) + } + + value := string(action) + if len(params) != 0 { + value = fmt.Sprintf("%s:%s", value, params) + } + netns.Annotations[networkv1.ChangePodNetworkAnnotation] = value +} + +// DeleteChangePodNetworkAnnotation removes network change intent from NetNamespace +func DeleteChangePodNetworkAnnotation(netns *networkv1.NetNamespace) { + delete(netns.Annotations, networkv1.ChangePodNetworkAnnotation) +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils.go b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils.go new file mode 100644 index 00000000000..b266db54eb3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils.go @@ -0,0 +1,65 @@ +package networkutils + +import ( + "fmt" + "net" +) + +const ( + SingleTenantPluginName = "redhat/openshift-ovs-subnet" + MultiTenantPluginName = "redhat/openshift-ovs-multitenant" + NetworkPolicyPluginName = "redhat/openshift-ovs-networkpolicy" +) + +var localHosts []string = []string{"127.0.0.1", "::1", "localhost"} +var localSubnets []string = []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7", "fe80::/10"} + +// IsPrivateAddress returns true if given address in format "[:]" is a localhost or an ip from +// private network range (e.g. 172.30.0.1, 192.168.0.1). +func IsPrivateAddress(addr string) bool { + host, _, err := net.SplitHostPort(addr) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = addr + } + for _, localHost := range localHosts { + if host == localHost { + return true + } + } + + ip := net.ParseIP(host) + if ip == nil { + return false + } + + for _, subnet := range localSubnets { + ipnet, err := ParseCIDRMask(subnet) + if err != nil { + continue // should not happen + } + if ipnet.Contains(ip) { + return true + } + } + return false +} + +// ParseCIDRMask parses a CIDR string and ensures that it has no bits set beyond the +// network mask length. Use this when the input is supposed to be either a description of +// a subnet (eg, "192.168.1.0/24", meaning "192.168.1.0 to 192.168.1.255"), or a mask for +// matching against (eg, "192.168.1.15/32", meaning "must match all 32 bits of the address +// "192.168.1.15"). Use net.ParseCIDR() when the input is a host address that also +// describes the subnet that it is on (eg, "192.168.1.15/24", meaning "the address +// 192.168.1.15 on the network 192.168.1.0/24"). +func ParseCIDRMask(cidr string) (*net.IPNet, error) { + ip, net, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + if !ip.Equal(net.IP) { + maskLen, addrLen := net.Mask.Size() + return nil, fmt.Errorf("CIDR network specification %q is not in canonical form (should be %s/%d or %s/%d?)", cidr, ip.Mask(net.Mask).String(), maskLen, ip.String(), addrLen) + } + return net, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils_test.go b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils_test.go new file mode 100644 index 00000000000..4124d208704 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils_test.go @@ -0,0 +1,100 @@ +package networkutils + +import ( + "strings" + "testing" +) + +func TestParseCIDRMask(t *testing.T) { + tests := []struct { + cidr string + fixedShort string + fixedLong string + }{ + { + cidr: "192.168.0.0/16", + }, + { + cidr: "192.168.1.0/24", + }, + { + cidr: "192.168.1.1/32", + }, + { + cidr: "192.168.1.0/16", + fixedShort: "192.168.0.0/16", + fixedLong: "192.168.1.0/32", + }, + { + cidr: "192.168.1.1/24", + fixedShort: "192.168.1.0/24", + fixedLong: "192.168.1.1/32", + }, + } + + for _, test := range tests { + _, err := ParseCIDRMask(test.cidr) + if test.fixedShort == "" && test.fixedLong == "" { + if err != nil { + t.Fatalf("unexpected error parsing CIDR mask %q: %v", test.cidr, err) + } + } else { + if err == nil { + t.Fatalf("unexpected lack of error parsing CIDR mask %q", test.cidr) + } + if !strings.Contains(err.Error(), test.fixedShort) { + t.Fatalf("error does not contain expected string %q: %v", test.fixedShort, err) + } + if !strings.Contains(err.Error(), test.fixedLong) { + t.Fatalf("error does not contain expected string %q: %v", test.fixedLong, err) + } + } + } +} + +func TestIsPrivateAddress(t *testing.T) { + for _, tc := range []struct { + address string + isLocal bool + }{ + {"localhost", true}, + {"example.com", false}, + {"registry.localhost", false}, + + {"9.255.255.255", false}, + {"10.0.0.1", true}, + {"10.1.255.255", true}, + {"10.255.255.255", true}, + {"11.0.0.1", false}, + + {"127.0.0.1", true}, + + {"172.15.255.253", false}, + {"172.16.0.1", true}, + {"172.30.0.1", true}, + {"172.31.255.255", true}, + {"172.32.0.1", false}, + + {"192.167.122.1", false}, + {"192.168.0.1", true}, + {"192.168.122.1", true}, + {"192.168.255.255", true}, + {"192.169.1.1", false}, + + {"::1", true}, + + {"fe00::1", false}, + {"fd12:3456:789a:1::1", true}, + {"fe82:3456:789a:1::1", true}, + {"ff00::1", false}, + } { + res := IsPrivateAddress(tc.address) + if tc.isLocal && !res { + t.Errorf("address %q considered not local", tc.address) + continue + } + if !tc.isLocal && res { + t.Errorf("address %q considered local", tc.address) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go new file mode 100644 index 00000000000..713a404208f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go @@ -0,0 +1,32 @@ +package oauthdiscovery + +// OauthAuthorizationServerMetadata holds OAuth 2.0 Authorization Server Metadata used for discovery +// https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 +type OauthAuthorizationServerMetadata struct { + // The authorization server's issuer identifier, which is a URL that uses the https scheme and has no query or fragment components. + // This is the location where .well-known RFC 5785 [RFC5785] resources containing information about the authorization server are published. + Issuer string `json:"issuer"` + + // URL of the authorization server's authorization endpoint [RFC6749]. + AuthorizationEndpoint string `json:"authorization_endpoint"` + + // URL of the authorization server's token endpoint [RFC6749]. + TokenEndpoint string `json:"token_endpoint"` + + // JSON array containing a list of the OAuth 2.0 [RFC6749] scope values that this authorization server supports. + // Servers MAY choose not to advertise some supported scope values even when this parameter is used. + ScopesSupported []string `json:"scopes_supported"` + + // JSON array containing a list of the OAuth 2.0 response_type values that this authorization server supports. + // The array values used are the same as those used with the response_types parameter defined by "OAuth 2.0 Dynamic Client Registration Protocol" [RFC7591]. + ResponseTypesSupported []string `json:"response_types_supported"` + + // JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. + // The array values used are the same as those used with the grant_types parameter defined by "OAuth 2.0 Dynamic Client Registration Protocol" [RFC7591]. + GrantTypesSupported []string `json:"grant_types_supported"` + + // JSON array containing a list of PKCE [RFC7636] code challenge methods supported by this authorization server. + // Code challenge method values are used in the "code_challenge_method" parameter defined in Section 4.3 of [RFC7636]. + // The valid code challenge method values are those registered in the IANA "PKCE Code Challenge Methods" registry [IANA.OAuth.Parameters]. + CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go new file mode 100644 index 00000000000..2539d4a3916 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go @@ -0,0 +1,37 @@ +package oauthdiscovery + +import ( + "path" + "strings" +) + +const ( + AuthorizePath = "/authorize" + TokenPath = "/token" + InfoPath = "/info" + + RequestTokenEndpoint = "/token/request" + DisplayTokenEndpoint = "/token/display" + ImplicitTokenEndpoint = "/token/implicit" +) + +const OpenShiftOAuthAPIPrefix = "/oauth" + +func OpenShiftOAuthAuthorizeURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, AuthorizePath) +} +func OpenShiftOAuthTokenURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, TokenPath) +} +func OpenShiftOAuthTokenRequestURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, RequestTokenEndpoint) +} +func OpenShiftOAuthTokenDisplayURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, DisplayTokenEndpoint) +} +func OpenShiftOAuthTokenImplicitURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, ImplicitTokenEndpoint) +} +func openShiftOAuthURL(masterAddr, oauthEndpoint string) string { + return strings.TrimRight(masterAddr, "/") + path.Join(OpenShiftOAuthAPIPrefix, oauthEndpoint) +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry.go new file mode 100644 index 00000000000..0f5c5cc0c19 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry.go @@ -0,0 +1,501 @@ +package oauthserviceaccountclient + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" + + "github.com/openshift/library-go/pkg/authorization/scopemetadata" + + clientv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" + kcoreclient "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + + oauthv1 "github.com/openshift/api/oauth/v1" + routev1 "github.com/openshift/api/route/v1" + routev1client "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" +) + +const ( + OAuthWantChallengesAnnotationPrefix = "serviceaccounts.openshift.io/oauth-want-challenges" + + // Prefix used for statically specifying redirect URIs for a service account via annotations + // The value can be partially supplied with the dynamic prefix to override the resource's defaults + OAuthRedirectModelAnnotationURIPrefix = "serviceaccounts.openshift.io/oauth-redirecturi." + + // Prefix used for dynamically specifying redirect URIs using resources for a service account via annotations + OAuthRedirectModelAnnotationReferencePrefix = "serviceaccounts.openshift.io/oauth-redirectreference." + + routeKind = "Route" + // TODO add ingress support + // IngressKind = "Ingress" +) + +var ( + modelPrefixes = []string{ + OAuthRedirectModelAnnotationURIPrefix, + OAuthRedirectModelAnnotationReferencePrefix, + } + + emptyGroupKind = schema.GroupKind{} // Used with static redirect URIs + routeGroupKind = schema.GroupKind{Group: "route.openshift.io", Kind: routeKind} + legacyRouteGroupKind = schema.GroupKind{Group: "", Kind: routeKind} // to support redirect reference with old group + + scheme = runtime.NewScheme() + codecFactory = serializer.NewCodecFactory(scheme) +) + +func init() { + oauthv1.Install(scheme) + oauthv1.DeprecatedInstallWithoutGroup(scheme) +} + +// namesToObjMapperFunc is linked to a given GroupKind. +// Based on the namespace and names provided, it builds a map of resource name to redirect URIs. +// The redirect URIs represent the default values as specified by the resource. +// These values can be overridden by user specified data. Errors returned are informative and non-fatal. +type namesToObjMapperFunc func(namespace string, names sets.String) (map[string]redirectURIList, []error) + +// TODO add ingress support +// var ingressGroupKind = routeapi.SchemeGroupVersion.WithKind(IngressKind).GroupKind() + +// OAuthClientGetter exposes a way to get a specific client. This is useful for other registries to get scope limitations +// on particular clients. This interface will make its easier to write a future cache on it +type OAuthClientGetter interface { + Get(name string, options metav1.GetOptions) (*oauthv1.OAuthClient, error) +} + +type saOAuthClientAdapter struct { + saClient kcoreclient.ServiceAccountsGetter + secretClient kcoreclient.SecretsGetter + eventRecorder record.EventRecorder + routeClient routev1client.RoutesGetter + // TODO add ingress support + //ingressClient ?? + + delegate OAuthClientGetter + grantMethod oauthv1.GrantHandlerType + + decoder runtime.Decoder +} + +// model holds fields that could be used to build redirect URI(s). +// The resource components define where to get the default redirect data from. +// If specified, the uri components are used to override the default data. +// As long as the resulting URI(s) have a scheme and a host, they are considered valid. +type model struct { + scheme string + port string + path string + host string + + group string + kind string + name string +} + +// getGroupKind is used to determine if a group and kind combination is supported. +func (m *model) getGroupKind() schema.GroupKind { + return schema.GroupKind{Group: m.group, Kind: m.kind} +} + +// updateFromURI updates the data in the model with the user provided URL data. +func (m *model) updateFromURI(u *url.URL) { + m.scheme, m.host, m.path = u.Scheme, u.Host, u.Path + if h, p, err := net.SplitHostPort(m.host); err == nil { + m.host = h + m.port = p + } +} + +// updateFromReference updates the data in the model with the user provided object reference data. +func (m *model) updateFromReference(r *oauthv1.RedirectReference) { + m.group, m.kind, m.name = r.Group, r.Kind, r.Name +} + +type modelList []model + +// getNames determines the unique, non-empty resource names specified by the models. +func (ml modelList) getNames() sets.String { + data := sets.NewString() + for _, model := range ml { + if len(model.name) > 0 { + data.Insert(model.name) + } + } + return data +} + +// getRedirectURIs uses the mapping provided by a namesToObjMapperFunc to enumerate all of the redirect URIs +// based on the name of each resource. The user provided data in the model overrides the data in the mapping. +// The returned redirect URIs may contain duplicate and invalid entries. All items in the modelList must have a +// uniform group/kind, and the objMapper must be specifically for that group/kind. +func (ml modelList) getRedirectURIs(objMapper map[string]redirectURIList) redirectURIList { + var data redirectURIList + for _, m := range ml { + if uris, ok := objMapper[m.name]; ok { + for _, uri := range uris { + u := uri // Make sure we do not mutate objMapper + u.merge(&m) + data = append(data, u) + } + } + } + return data +} + +type redirectURI struct { + scheme string + host string + port string + path string +} + +func (uri *redirectURI) String() string { + host := uri.host + if len(uri.port) > 0 { + host = net.JoinHostPort(host, uri.port) + } + return (&url.URL{Scheme: uri.scheme, Host: host, Path: uri.path}).String() +} + +// isValid returns true when both scheme and host are non-empty. +func (uri *redirectURI) isValid() bool { + return len(uri.scheme) > 0 && len(uri.host) > 0 +} + +type redirectURIList []redirectURI + +// extractValidRedirectURIStrings returns the redirect URIs that are valid per `isValid` as strings. +func (rl redirectURIList) extractValidRedirectURIStrings() []string { + var data []string + for _, u := range rl { + if u.isValid() { + data = append(data, u.String()) + } + } + return data +} + +// merge overrides the default data in the uri with the user provided data in the model. +func (uri *redirectURI) merge(m *model) { + if len(m.scheme) > 0 { + uri.scheme = m.scheme + } + if len(m.path) > 0 { + uri.path = m.path + } + if len(m.port) > 0 { + uri.port = m.port + } + if len(m.host) > 0 { + uri.host = m.host + } +} + +var _ OAuthClientGetter = &saOAuthClientAdapter{} + +func NewServiceAccountOAuthClientGetter( + saClient kcoreclient.ServiceAccountsGetter, + secretClient kcoreclient.SecretsGetter, + eventClient kcoreclient.EventInterface, + routeClient routev1client.RoutesGetter, + delegate OAuthClientGetter, + grantMethod oauthv1.GrantHandlerType, +) OAuthClientGetter { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&kcoreclient.EventSinkImpl{Interface: eventClient}) + recorder := eventBroadcaster.NewRecorder(scheme, clientv1.EventSource{Component: "service-account-oauth-client-getter"}) + return &saOAuthClientAdapter{ + saClient: saClient, + secretClient: secretClient, + eventRecorder: recorder, + routeClient: routeClient, + delegate: delegate, + grantMethod: grantMethod, + decoder: codecFactory.UniversalDecoder(), + } +} + +func (a *saOAuthClientAdapter) Get(name string, options metav1.GetOptions) (*oauthv1.OAuthClient, error) { + var err error + saNamespace, saName, err := apiserverserviceaccount.SplitUsername(name) + if err != nil { + return a.delegate.Get(name, options) + } + + sa, err := a.saClient.ServiceAccounts(saNamespace).Get(saName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + var saErrors []error + var failReason string + // Create a warning event combining the collected annotation errors upon failure. + defer func() { + if err != nil && len(saErrors) > 0 && len(failReason) > 0 { + a.eventRecorder.Event(sa, corev1.EventTypeWarning, failReason, utilerrors.NewAggregate(saErrors).Error()) + } + }() + + redirectURIs := []string{} + modelsMap, errs := parseModelsMap(sa.Annotations, a.decoder) + if len(errs) > 0 { + saErrors = append(saErrors, errs...) + } + + if len(modelsMap) > 0 { + uris, extractErrors := a.extractRedirectURIs(modelsMap, saNamespace) + if len(uris) > 0 { + redirectURIs = append(redirectURIs, uris.extractValidRedirectURIStrings()...) + } + if len(extractErrors) > 0 { + saErrors = append(saErrors, extractErrors...) + } + } + if len(redirectURIs) == 0 { + err = fmt.Errorf("%v has no redirectURIs; set %v= or create a dynamic URI using %v=", + name, OAuthRedirectModelAnnotationURIPrefix, OAuthRedirectModelAnnotationReferencePrefix, + ) + failReason = "NoSAOAuthRedirectURIs" + saErrors = append(saErrors, err) + return nil, err + } + + tokens, err := a.getServiceAccountTokens(sa) + if err != nil { + return nil, err + } + if len(tokens) == 0 { + err = fmt.Errorf("%v has no tokens", name) + failReason = "NoSAOAuthTokens" + saErrors = append(saErrors, err) + return nil, err + } + + saWantsChallenges, _ := strconv.ParseBool(sa.Annotations[OAuthWantChallengesAnnotationPrefix]) + + saClient := &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + ScopeRestrictions: getScopeRestrictionsFor(saNamespace, saName), + AdditionalSecrets: tokens, + RespondWithChallenges: saWantsChallenges, + + // TODO update this to allow https redirection to any + // 1. service IP (useless in general) + // 2. service DNS (useless in general) + // 3. loopback? (useful, but maybe a bit weird) + RedirectURIs: sets.NewString(redirectURIs...).List(), + GrantMethod: a.grantMethod, + } + return saClient, nil +} + +// parseModelsMap builds a map of model name to model using a service account's annotations. +// The model name is only used for building the map (it ties together the uri and reference annotations) +// and serves no functional purpose other than making testing easier. Errors returned are informative and non-fatal. +func parseModelsMap(annotations map[string]string, decoder runtime.Decoder) (map[string]model, []error) { + models := map[string]model{} + parseErrors := []error{} + for key, value := range annotations { + prefix, name, ok := parseModelPrefixName(key) + if !ok { + continue + } + m := models[name] + switch prefix { + case OAuthRedirectModelAnnotationURIPrefix: + if u, err := url.Parse(value); err == nil { + m.updateFromURI(u) + } else { + parseErrors = append(parseErrors, err) + } + case OAuthRedirectModelAnnotationReferencePrefix: + r := &oauthv1.OAuthRedirectReference{} + if err := runtime.DecodeInto(decoder, []byte(value), r); err == nil { + m.updateFromReference(&r.Reference) + } else { + parseErrors = append(parseErrors, err) + } + } + models[name] = m + } + return models, parseErrors +} + +// parseModelPrefixName determines if the given key is a model prefix. +// Returns what prefix was used, the name of the model, and true if a model prefix was actually used. +func parseModelPrefixName(key string) (string, string, bool) { + for _, prefix := range modelPrefixes { + if strings.HasPrefix(key, prefix) { + return prefix, key[len(prefix):], true + } + } + return "", "", false +} + +// extractRedirectURIs builds redirect URIs using the given models and namespace. +// The returned redirect URIs may contain duplicates and invalid entries. Errors returned are informative and non-fatal. +func (a *saOAuthClientAdapter) extractRedirectURIs(modelsMap map[string]model, namespace string) (redirectURIList, []error) { + var data redirectURIList + routeErrors := []error{} + groupKindModelListMapper := map[schema.GroupKind]modelList{} // map of GroupKind to all models belonging to it + groupKindModelToURI := map[schema.GroupKind]namesToObjMapperFunc{ + routeGroupKind: a.redirectURIsFromRoutes, + // TODO add support for ingresses by creating the appropriate GroupKind and namesToObjMapperFunc + // ingressGroupKind: a.redirectURIsFromIngresses, + } + + for _, m := range modelsMap { + gk := m.getGroupKind() + if gk == legacyRouteGroupKind { + gk = routeGroupKind // support legacy route group without doing extra API calls + } + if len(m.name) == 0 && gk == emptyGroupKind { // Is this a static redirect URI? + uri := redirectURI{} // No defaults wanted + uri.merge(&m) + data = append(data, uri) + } else if _, ok := groupKindModelToURI[gk]; ok { // a GroupKind is valid if we have a namesToObjMapperFunc to handle it + groupKindModelListMapper[gk] = append(groupKindModelListMapper[gk], m) + } + } + + for gk, models := range groupKindModelListMapper { + if names := models.getNames(); names.Len() > 0 { + objMapper, errs := groupKindModelToURI[gk](namespace, names) + if len(objMapper) > 0 { + data = append(data, models.getRedirectURIs(objMapper)...) + } + if len(errs) > 0 { + routeErrors = append(routeErrors, errs...) + } + } + } + + return data, routeErrors +} + +// redirectURIsFromRoutes is the namesToObjMapperFunc specific to Routes. +// Returns a map of route name to redirect URIs that contain the default data as specified by the route's ingresses. +// Errors returned are informative and non-fatal. +func (a *saOAuthClientAdapter) redirectURIsFromRoutes(namespace string, osRouteNames sets.String) (map[string]redirectURIList, []error) { + var routes []routev1.Route + routeErrors := []error{} + routeInterface := a.routeClient.Routes(namespace) + if osRouteNames.Len() > 1 { + if r, err := routeInterface.List(metav1.ListOptions{}); err == nil { + routes = r.Items + } else { + routeErrors = append(routeErrors, err) + } + } else { + if r, err := routeInterface.Get(osRouteNames.List()[0], metav1.GetOptions{}); err == nil { + routes = append(routes, *r) + } else { + routeErrors = append(routeErrors, err) + } + } + routeMap := map[string]redirectURIList{} + for _, route := range routes { + if osRouteNames.Has(route.Name) { + routeMap[route.Name] = redirectURIsFromRoute(&route) + } + } + return routeMap, routeErrors +} + +// redirectURIsFromRoute returns a list of redirect URIs that contain the default data as specified by the given route's ingresses. +func redirectURIsFromRoute(route *routev1.Route) redirectURIList { + var uris redirectURIList + uri := redirectURI{scheme: "https"} // Default to TLS + uri.path = route.Spec.Path + if route.Spec.TLS == nil { + uri.scheme = "http" + } + for _, ingress := range route.Status.Ingress { + if !isRouteIngressValid(&ingress) { + continue + } + u := uri // Copy to avoid mutating the base uri + u.host = ingress.Host + uris = append(uris, u) + } + // If we get this far we know the Route does actually exist, so we need to have at least one uri + // to allow the user to override it in their annotations in case there is no valid ingress + // `extractValidRedirectURIStrings` guarantees that we eventually have the minimum set of required fields + if len(uris) == 0 { + uris = append(uris, uri) + } + return uris +} + +// isRouteIngressValid determines if the RouteIngress has a host and that its conditions has an element with Type=RouteAdmitted and Status=ConditionTrue +func isRouteIngressValid(routeIngress *routev1.RouteIngress) bool { + if len(routeIngress.Host) == 0 { + return false + } + for _, condition := range routeIngress.Conditions { + if condition.Type == routev1.RouteAdmitted && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func getScopeRestrictionsFor(namespace, name string) []oauthv1.ScopeRestriction { + return []oauthv1.ScopeRestriction{ + {ExactValues: []string{ + scopemetadata.UserInfo, + scopemetadata.UserAccessCheck, + scopemetadata.UserListScopedProjects, + scopemetadata.UserListAllProjects, + }}, + {ClusterRole: &oauthv1.ClusterRoleScopeRestriction{RoleNames: []string{"*"}, Namespaces: []string{namespace}, AllowEscalation: true}}, + } +} + +// getServiceAccountTokens returns all ServiceAccountToken secrets for the given ServiceAccount +func (a *saOAuthClientAdapter) getServiceAccountTokens(sa *corev1.ServiceAccount) ([]string, error) { + allSecrets, err := a.secretClient.Secrets(sa.Namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + tokens := []string{} + for i := range allSecrets.Items { + secret := &allSecrets.Items[i] + if IsServiceAccountToken(secret, sa) { + tokens = append(tokens, string(secret.Data[corev1.ServiceAccountTokenKey])) + } + } + return tokens, nil +} + +// IsServiceAccountToken returns true if the secret is a valid api token for the service account +func IsServiceAccountToken(secret *corev1.Secret, sa *corev1.ServiceAccount) bool { + if secret.Type != corev1.SecretTypeServiceAccountToken { + return false + } + + name := secret.Annotations[corev1.ServiceAccountNameKey] + uid := secret.Annotations[corev1.ServiceAccountUIDKey] + if name != sa.Name { + // Name must match + return false + } + if len(uid) > 0 && uid != string(sa.UID) { + // If UID is specified, it must match + return false + } + + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go new file mode 100644 index 00000000000..8402b755e52 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go @@ -0,0 +1,1261 @@ +package oauthserviceaccountclient + +import ( + "reflect" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/record" + + oauthv1 "github.com/openshift/api/oauth/v1" + routev1 "github.com/openshift/api/route/v1" + routev1fake "github.com/openshift/client-go/route/clientset/versioned/fake" +) + +var ( + encoder = codecFactory.LegacyCodec(oauthv1.SchemeGroupVersion) + decoder = codecFactory.UniversalDecoder() + serviceAccountsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} + secretsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + secretKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} + routesResource = schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"} + routeClientKind = schema.GroupVersionKind{Group: "route.openshift.io", Version: "v1", Kind: "Route"} +) + +func TestGetClient(t *testing.T) { + testCases := []struct { + name string + clientName string + kubeClient *fake.Clientset + routeClient *routev1fake.Clientset + + expectedDelegation bool + expectedErr string + expectedEventMsg string + expectedClient *oauthv1.OAuthClient + expectedKubeActions []clientgotesting.Action + expectedOSActions []clientgotesting.Action + }{ + { + name: "delegate", + clientName: "not:serviceaccount", + kubeClient: fake.NewSimpleClientset(), + routeClient: routev1fake.NewSimpleClientset(), + expectedDelegation: true, + expectedKubeActions: []clientgotesting.Action{}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "missing sa", + clientName: "system:serviceaccount:ns-01:missing-sa", + kubeClient: fake.NewSimpleClientset(), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `serviceaccounts "missing-sa" not found`, + expectedKubeActions: []clientgotesting.Action{clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "missing-sa")}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "sa no redirects", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{}, + }, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.`, + expectedEventMsg: `Warning NoSAOAuthRedirectURIs system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=`, + + //expectedEventMsg: `Warning NoSAOAuthRedirectURIs [parse ::: missing protocol scheme, system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=]`, + expectedKubeActions: []clientgotesting.Action{clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default")}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "sa invalid redirect scheme", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{OAuthRedirectModelAnnotationURIPrefix + "incomplete": "::"}, + }, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.`, + expectedEventMsg: `Warning NoSAOAuthRedirectURIs [parse ::: missing protocol scheme, system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=]`, + expectedKubeActions: []clientgotesting.Action{clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default")}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "sa no tokens", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere"}, + }, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `system:serviceaccount:ns-01:default has no tokens`, + expectedEventMsg: `Warning NoSAOAuthTokens system:serviceaccount:ns-01:default has no tokens`, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "good SA", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere"}, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "good SA with valid, simple route redirects", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", "route.openshift.io"), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "example1.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere", "https://example1.com/defaultpath"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(routesResource, "ns-01", "route1"), + }, + }, + { + name: "good SA with invalid route redirects", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", "wronggroup"), + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString("wrongkind", "route1", "route.openshift.io"), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "example1.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example2.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example3.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "good SA with a route that doesn't have a host", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", ""), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(routesResource, "ns-01", "route1"), + }, + }, + { + name: "good SA with routes that don't have hosts, some of which are empty or duplicates", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", "route.openshift.io"), + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString(routeKind, "route2", ""), + OAuthRedirectModelAnnotationReferencePrefix + "3": buildRedirectObjectReferenceString(routeKind, "missingroute", ""), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "", Conditions: buildValidRouteIngressCondition()}, + {Host: "a.com", Conditions: buildValidRouteIngressCondition()}, + {Host: ""}, + {Host: "a.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "b.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route2", + UID: types.UID("route2"), + }, + Spec: routev1.RouteSpec{ + Path: "/path2", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "a.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "", Conditions: buildValidRouteIngressCondition()}, + {Host: "b.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "b.com"}, + {Host: ""}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere", "https://a.com/defaultpath", "https://a.com/path2", "https://b.com/defaultpath", "https://b.com/path2"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewListAction(routesResource, routeClientKind, "ns-01", metav1.ListOptions{}), + }, + }, + { + name: "host overrides route data", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", ""), + OAuthRedirectModelAnnotationURIPrefix + "1": "//redhat.com", + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString(routeKind, "route2", "route.openshift.io"), + OAuthRedirectModelAnnotationURIPrefix + "2": "//google.com", + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: ""}, + }, + }, + }, + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route2", + UID: types.UID("route2"), + }, + Spec: routev1.RouteSpec{ + Path: "/otherpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "ignored.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "alsoignored.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"https://google.com/otherpath", "https://redhat.com/defaultpath"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewListAction(routesResource, routeClientKind, "ns-01", metav1.ListOptions{}), + }, + }, + { + name: "good SA with valid, route redirects using the same route twice", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "1": "/awesomepath", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", ""), + OAuthRedirectModelAnnotationURIPrefix + "2": "//:8000", + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString(routeKind, "route1", "route.openshift.io"), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "woot.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"https://woot.com/awesomepath", "https://woot.com:8000"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(routesResource, "ns-01", "route1"), + }, + }, + } + + for _, tc := range testCases { + delegate := &fakeDelegate{} + fakerecorder := record.NewFakeRecorder(100) + getter := saOAuthClientAdapter{ + saClient: tc.kubeClient.CoreV1(), + secretClient: tc.kubeClient.CoreV1(), + eventRecorder: fakerecorder, + routeClient: tc.routeClient.RouteV1(), + delegate: delegate, + grantMethod: oauthv1.GrantHandlerPrompt, + decoder: codecFactory.UniversalDecoder(), + } + client, err := getter.Get(tc.clientName, metav1.GetOptions{}) + switch { + case len(tc.expectedErr) == 0 && err == nil: + case len(tc.expectedErr) == 0 && err != nil, + len(tc.expectedErr) > 0 && err == nil, + len(tc.expectedErr) > 0 && err != nil && !strings.Contains(err.Error(), tc.expectedErr): + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedErr, err) + continue + } + + if tc.expectedDelegation != delegate.called { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedDelegation, delegate.called) + continue + } + + if !equality.Semantic.DeepEqual(tc.expectedClient, client) { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedClient, client) + continue + } + + if !reflect.DeepEqual(tc.expectedKubeActions, tc.kubeClient.Actions()) { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedKubeActions, tc.kubeClient.Actions()) + continue + } + + if !reflect.DeepEqual(tc.expectedOSActions, tc.routeClient.Actions()) { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedOSActions, tc.routeClient.Actions()) + continue + } + + if len(tc.expectedEventMsg) > 0 { + var ev string + select { + case ev = <-fakerecorder.Events: + default: + } + if tc.expectedEventMsg != ev { + t.Errorf("%s: expected event message %#v, got %#v", tc.name, tc.expectedEventMsg, ev) + } + } + } +} + +type fakeDelegate struct { + called bool +} + +func (d *fakeDelegate) Get(name string, options metav1.GetOptions) (*oauthv1.OAuthClient, error) { + d.called = true + return nil, nil +} + +func TestRedirectURIString(t *testing.T) { + for _, test := range []struct { + name string + uri redirectURI + expected string + }{ + { + name: "host with no port", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "", + path: "/test1", + }, + expected: "http://example1.com/test1", + }, + { + name: "host with port", + uri: redirectURI{ + scheme: "https", + host: "example2.com", + port: "8000", + path: "/test2", + }, + expected: "https://example2.com:8000/test2", + }, + } { + if test.expected != test.uri.String() { + t.Errorf("%s: expected %s, got %s", test.name, test.expected, test.uri.String()) + } + } +} + +func TestMerge(t *testing.T) { + for _, test := range []struct { + name string + uri redirectURI + m model + expected redirectURI + }{ + { + name: "empty model", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + m: model{ + scheme: "", + port: "", + path: "", + }, + expected: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + }, + { + name: "full model", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + m: model{ + scheme: "https", + port: "8000", + path: "/ello", + }, + expected: redirectURI{ + scheme: "https", + host: "example1.com", + port: "8000", + path: "/ello", + }, + }, + { + name: "only path", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + m: model{ + scheme: "", + port: "", + path: "/newpath", + }, + expected: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/newpath", + }, + }, + } { + test.uri.merge(&test.m) + if test.expected != test.uri { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, test.uri) + } + } +} + +func TestParseModelsMap(t *testing.T) { + for _, test := range []struct { + name string + annotations map[string]string + expected map[string]model + }{ + { + name: "empty annotations", + annotations: map[string]string{}, + expected: map[string]model{}, + }, + { + name: "no model annotations", + annotations: map[string]string{"one": "anywhere"}, + expected: map[string]model{}, + }, + { + name: "static URI annotations", + annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "//google.com", + OAuthRedirectModelAnnotationURIPrefix + "two": "justapath", + OAuthRedirectModelAnnotationURIPrefix + "three": "http://redhat.com", + OAuthRedirectModelAnnotationURIPrefix + "four": "http://hello:90/world", + OAuthRedirectModelAnnotationURIPrefix + "five": "scheme0://host0:5000/path0", + OAuthRedirectModelAnnotationReferencePrefix + "five": buildRedirectObjectReferenceString("kind0", "name0", "group0"), + }, + expected: map[string]model{ + "one": { + scheme: "", + port: "", + path: "", + group: "", + kind: "", + name: "", + host: "google.com", + }, + "two": { + scheme: "", + port: "", + path: "justapath", + group: "", + kind: "", + name: "", + }, + "three": { + scheme: "http", + port: "", + path: "", + group: "", + kind: "", + name: "", + host: "redhat.com", + }, + "four": { + scheme: "http", + port: "90", + path: "/world", + group: "", + kind: "", + name: "", + host: "hello", + }, + "five": { + scheme: "scheme0", + port: "5000", + path: "/path0", + group: "group0", + kind: "kind0", + name: "name0", + host: "host0", + }, + }, + }, + { + name: "simple model", + annotations: map[string]string{ + OAuthRedirectModelAnnotationReferencePrefix + "one": buildRedirectObjectReferenceString(routeKind, "route1", ""), + }, + expected: map[string]model{ + "one": { + scheme: "", + port: "", + path: "", + group: "", + kind: routeKind, + name: "route1", + }, + }, + }, + { + name: "multiple full models", + annotations: map[string]string{ + OAuthRedirectModelAnnotationReferencePrefix + "one": buildRedirectObjectReferenceString(routeKind, "route1", ""), + OAuthRedirectModelAnnotationURIPrefix + "one": "https://:8000/path1", + + OAuthRedirectModelAnnotationReferencePrefix + "two": buildRedirectObjectReferenceString(routeKind, "route2", "route.openshift.io"), + OAuthRedirectModelAnnotationURIPrefix + "two": "http://:9000/path2", + }, + expected: map[string]model{ + "one": { + scheme: "https", + port: "8000", + path: "/path1", + group: "", + kind: routeKind, + name: "route1", + }, + "two": { + scheme: "http", + port: "9000", + path: "/path2", + group: "route.openshift.io", + kind: routeKind, + name: "route2", + }, + }, + }, + } { + models, errs := parseModelsMap(test.annotations, decoder) + if len(errs) > 0 { + t.Errorf("%s: unexpected parseModelsMap errors %v", test.name, errs) + } + if !reflect.DeepEqual(test.expected, models) { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, models) + } + } +} + +func TestGetRedirectURIs(t *testing.T) { + for _, test := range []struct { + name string + namespace string + models modelList + routes []*routev1.Route + expected redirectURIList + }{ + { + name: "single ingress routes", + namespace: "ns01", + models: modelList{ + { + scheme: "https", + port: "8000", + path: "/path1", + group: "", + kind: routeKind, + name: "route1", + }, + { + scheme: "http", + port: "9000", + path: "", + group: "", + kind: routeKind, + name: "route2", + }, + }, + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route1", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathA", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "exampleA.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route2", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathB", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "exampleB.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: redirectURIList{ + { + scheme: "https", + host: "exampleA.com", + port: "8000", + path: "/path1", + }, + { + scheme: "http", + host: "exampleB.com", + port: "9000", + path: "/pathB", + }, + }, + }, + { + name: "multiple ingress routes", + namespace: "ns01", + models: modelList{ + { + scheme: "https", + port: "8000", + path: "/path1", + group: "", + kind: routeKind, + name: "route1", + }, + { + scheme: "http", + port: "9000", + path: "", + group: "", + kind: routeKind, + name: "route2", + }, + { + scheme: "http", + port: "", + path: "/secondroute2path", + group: "", + kind: routeKind, + name: "route2", + }, + }, + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route1", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathA", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "A.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "B.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "C.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route2", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathB", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "0.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "1.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: redirectURIList{ + { + scheme: "https", + host: "A.com", + port: "8000", + path: "/path1", + }, + { + scheme: "https", + host: "B.com", + port: "8000", + path: "/path1", + }, + { + scheme: "https", + host: "C.com", + port: "8000", + path: "/path1", + }, + { + scheme: "http", + host: "0.com", + port: "9000", + path: "/pathB", + }, + { + scheme: "http", + host: "1.com", + port: "9000", + path: "/pathB", + }, + { + scheme: "http", + host: "0.com", + port: "", + path: "/secondroute2path", + }, + { + scheme: "http", + host: "1.com", + port: "", + path: "/secondroute2path", + }, + }, + }, + } { + a := buildRouteClient(test.routes) + uris, errs := a.redirectURIsFromRoutes(test.namespace, test.models.getNames()) + if len(errs) > 0 { + t.Errorf("%s: unexpected redirectURIsFromRoutes errors %v", test.name, errs) + } + actual := test.models.getRedirectURIs(uris) + if !reflect.DeepEqual(test.expected, actual) { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, actual) + } + } +} + +func TestRedirectURIsFromRoutes(t *testing.T) { + for _, test := range []struct { + name string + namespace string + names sets.String + routes []*routev1.Route + expected map[string]redirectURIList + }{ + { + name: "single route with single ingress", + namespace: "ns01", + names: sets.NewString("routeA"), + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "routeA", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathA", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "exampleA.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: map[string]redirectURIList{ + "routeA": { + { + scheme: "http", + host: "exampleA.com", + port: "", + path: "/pathA", + }, + }, + }, + }, + { + name: "multiple routes with multiple ingresses", + namespace: "ns01", + names: sets.NewString("route0", "route1", "route2"), + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route0", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/path0", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "example0A.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example0B.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example0C.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route1", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/path1", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "redhat.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "coreos.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "github.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route2", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/path2", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "google.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "yahoo.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "bing.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: map[string]redirectURIList{ + "route0": { + { + scheme: "http", + host: "example0A.com", + port: "", + path: "/path0", + }, + { + scheme: "http", + host: "example0B.com", + port: "", + path: "/path0", + }, + { + scheme: "http", + host: "example0C.com", + port: "", + path: "/path0", + }, + }, + "route1": { + { + scheme: "https", + host: "redhat.com", + port: "", + path: "/path1", + }, + { + scheme: "https", + host: "coreos.com", + port: "", + path: "/path1", + }, + { + scheme: "https", + host: "github.com", + port: "", + path: "/path1", + }, + }, + "route2": { + { + scheme: "https", + host: "google.com", + port: "", + path: "/path2", + }, + { + scheme: "https", + host: "yahoo.com", + port: "", + path: "/path2", + }, + { + scheme: "https", + host: "bing.com", + port: "", + path: "/path2", + }, + }, + }, + }, + } { + a := buildRouteClient(test.routes) + uris, errs := a.redirectURIsFromRoutes(test.namespace, test.names) + if len(errs) > 0 { + t.Errorf("%s: unexpected redirectURIsFromRoutes errors %v", test.name, errs) + } + if !reflect.DeepEqual(test.expected, uris) { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, uris) + } + } +} + +func buildRouteClient(routes []*routev1.Route) saOAuthClientAdapter { + objects := []runtime.Object{} + for _, route := range routes { + objects = append(objects, route) + } + return saOAuthClientAdapter{ + routeClient: routev1fake.NewSimpleClientset(objects...).RouteV1(), + eventRecorder: record.NewFakeRecorder(100), + } +} + +func buildRedirectObjectReferenceString(kind, name, group string) string { + ref := &oauthv1.OAuthRedirectReference{ + Reference: oauthv1.RedirectReference{ + Kind: kind, + Name: name, + Group: group, + }, + } + data, err := runtime.Encode(encoder, ref) + if err != nil { + panic(err) + } + return string(data) +} + +func buildValidRouteIngressCondition() []routev1.RouteIngressCondition { + return []routev1.RouteIngressCondition{{Type: routev1.RouteAdmitted, Status: corev1.ConditionTrue}} +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go new file mode 100644 index 00000000000..9a773c2d4c2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle.go @@ -0,0 +1,120 @@ +package certrotation + +import ( + "crypto/x509" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/util/cert" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/certs" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" +) + +// CABundleRotation maintains a CA bundle config map, but adding new CA certs and removing expired old ones. +type CABundleRotation struct { + Namespace string + Name string + + Informer corev1informers.ConfigMapInformer + Lister corev1listers.ConfigMapLister + Client corev1client.ConfigMapsGetter + EventRecorder events.Recorder +} + +func (c CABundleRotation) ensureConfigMapCABundle(signingCertKeyPair *crypto.CA) ([]*x509.Certificate, error) { + // by this point we have current signing cert/key pair. We now need to make sure that the ca-bundle configmap has this cert and + // doesn't have any expired certs + originalCABundleConfigMap, err := c.Lister.ConfigMaps(c.Namespace).Get(c.Name) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + caBundleConfigMap := originalCABundleConfigMap.DeepCopy() + if apierrors.IsNotFound(err) { + // create an empty one + caBundleConfigMap = &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} + } + updatedCerts, err := manageCABundleConfigMap(caBundleConfigMap, signingCertKeyPair.Config.Certs[0]) + if err != nil { + return nil, err + } + if originalCABundleConfigMap == nil || originalCABundleConfigMap.Data == nil || !equality.Semantic.DeepEqual(originalCABundleConfigMap.Data, caBundleConfigMap.Data) { + c.EventRecorder.Eventf("CABundleUpdateRequired", "%q in %q requires a new cert", c.Name, c.Namespace) + LabelAsManagedConfigMap(caBundleConfigMap, CertificateTypeCABundle) + + actualCABundleConfigMap, modified, err := resourceapply.ApplyConfigMap(c.Client, c.EventRecorder, caBundleConfigMap) + if err != nil { + return nil, err + } + if modified { + klog.V(2).Infof("Updated ca-bundle.crt configmap %s/%s with:\n%s", certs.CertificateBundleToString(updatedCerts), caBundleConfigMap.Namespace, caBundleConfigMap.Name) + } + + caBundleConfigMap = actualCABundleConfigMap + } + + caBundle := caBundleConfigMap.Data["ca-bundle.crt"] + if len(caBundle) == 0 { + return nil, fmt.Errorf("configmap/%s -n%s missing ca-bundle.crt", caBundleConfigMap.Name, caBundleConfigMap.Namespace) + } + certificates, err := cert.ParseCertsPEM([]byte(caBundle)) + if err != nil { + return nil, err + } + + return certificates, nil +} + +// manageCABundleConfigMap adds the new certificate to the list of cabundles, eliminates duplicates, and prunes the list of expired +// certs to trust as signers +func manageCABundleConfigMap(caBundleConfigMap *corev1.ConfigMap, currentSigner *x509.Certificate) ([]*x509.Certificate, error) { + if caBundleConfigMap.Data == nil { + caBundleConfigMap.Data = map[string]string{} + } + + certificates := []*x509.Certificate{} + caBundle := caBundleConfigMap.Data["ca-bundle.crt"] + if len(caBundle) > 0 { + var err error + certificates, err = cert.ParseCertsPEM([]byte(caBundle)) + if err != nil { + return nil, err + } + } + certificates = append([]*x509.Certificate{currentSigner}, certificates...) + certificates = crypto.FilterExpiredCerts(certificates...) + + finalCertificates := []*x509.Certificate{} + // now check for duplicates. n^2, but super simple + for i := range certificates { + found := false + for j := range finalCertificates { + if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) { + found = true + break + } + } + if !found { + finalCertificates = append(finalCertificates, certificates[i]) + } + } + + caBytes, err := crypto.EncodeCertificates(finalCertificates...) + if err != nil { + return nil, err + } + + caBundleConfigMap.Data["ca-bundle.crt"] = string(caBytes) + + return finalCertificates, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle_test.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle_test.go new file mode 100644 index 00000000000..d08a58a7b10 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/cabundle_test.go @@ -0,0 +1,301 @@ +package certrotation + +import ( + gcrypto "crypto" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "errors" + "io/ioutil" + "math/big" + "strings" + "testing" + "time" + + "k8s.io/client-go/util/cert" + + "github.com/davecgh/go-spew/spew" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubefake "k8s.io/client-go/kubernetes/fake" + corev1listers "k8s.io/client-go/listers/core/v1" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" +) + +func TestEnsureConfigMapCABundle(t *testing.T) { + tests := []struct { + name string + + initialConfigMapFn func() *corev1.ConfigMap + caFn func() (*crypto.CA, error) + + verifyActions func(t *testing.T, client *kubefake.Clientset) + expectedError string + }{ + { + name: "initial create", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialConfigMapFn: func() *corev1.ConfigMap { return nil }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[0].Matches("get", "configmaps") { + t.Error(actions[0]) + } + if !actions[1].Matches("create", "configmaps") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.CreateAction).GetObject().(*corev1.ConfigMap) + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } + if len(actual.Data["ca-bundle.crt"]) == 0 { + t.Error(actual.Data) + } + }, + }, + { + name: "update keep both", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialConfigMapFn: func() *corev1.ConfigMap { + caBundleConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "trust-bundle"}, + Data: map[string]string{}, + } + certs, err := newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + if err != nil { + t.Fatal(err) + } + caBytes, err := crypto.EncodeCertificates(certs.Config.Certs...) + if err != nil { + t.Fatal(err) + } + caBundleConfigMap.Data["ca-bundle.crt"] = string(caBytes) + return caBundleConfigMap + }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[1].Matches("update", "configmaps") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.ConfigMap) + if len(actual.Data["ca-bundle.crt"]) == 0 { + t.Error(actual.Data) + } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } + result, err := cert.ParseCertsPEM([]byte(actual.Data["ca-bundle.crt"])) + if err != nil { + t.Fatal(err) + } + if len(result) != 2 { + t.Error(len(result)) + } + }, + }, + { + name: "update remove old", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialConfigMapFn: func() *corev1.ConfigMap { + caBundleConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "trust-bundle"}, + Data: map[string]string{}, + } + certs, err := newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + if err != nil { + t.Fatal(err) + } + caBytes, err := crypto.EncodeCertificates(certs.Config.Certs[0], certs.Config.Certs[0]) + if err != nil { + t.Fatal(err) + } + caBundleConfigMap.Data["ca-bundle.crt"] = string(caBytes) + return caBundleConfigMap + }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[1].Matches("update", "configmaps") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.ConfigMap) + if len(actual.Data["ca-bundle.crt"]) == 0 { + t.Error(actual.Data) + } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } + result, err := cert.ParseCertsPEM([]byte(actual.Data["ca-bundle.crt"])) + if err != nil { + t.Fatal(err) + } + if len(result) != 2 { + t.Error(len(result)) + } + }, + }, + { + name: "update remove duplicate", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialConfigMapFn: func() *corev1.ConfigMap { + caBundleConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "trust-bundle"}, + Data: map[string]string{}, + } + certBytes, err := ioutil.ReadFile("./testfiles/tls-expired.crt") + if err != nil { + t.Fatal(err) + } + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + t.Fatal(err) + } + caBytes, err := crypto.EncodeCertificates(certs...) + if err != nil { + t.Fatal(err) + } + caBundleConfigMap.Data["ca-bundle.crt"] = string(caBytes) + return caBundleConfigMap + }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[1].Matches("update", "configmaps") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.ConfigMap) + if len(actual.Data["ca-bundle.crt"]) == 0 { + t.Error(actual.Data) + } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeCABundle { + t.Errorf("expected certificate type 'ca-bundle', got: %v", certType) + } + result, err := cert.ParseCertsPEM([]byte(actual.Data["ca-bundle.crt"])) + if err != nil { + t.Fatal(err) + } + if len(result) != 1 { + t.Error(len(result)) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + + client := kubefake.NewSimpleClientset() + if startingObj := test.initialConfigMapFn(); startingObj != nil { + indexer.Add(startingObj) + client = kubefake.NewSimpleClientset(startingObj) + } + + c := &CABundleRotation{ + Namespace: "ns", + Name: "trust-bundle", + + Client: client.CoreV1(), + Lister: corev1listers.NewConfigMapLister(indexer), + EventRecorder: events.NewInMemoryRecorder("test"), + } + + newCA, err := test.caFn() + if err != nil { + t.Fatal(err) + } + _, err = c.ensureConfigMapCABundle(newCA) + switch { + case err != nil && len(test.expectedError) == 0: + t.Error(err) + case err != nil && !strings.Contains(err.Error(), test.expectedError): + t.Error(err) + case err == nil && len(test.expectedError) != 0: + t.Errorf("missing %q", test.expectedError) + } + + test.verifyActions(t, client) + }) + } +} + +// NewCACertificate generates and signs new CA certificate and key. +func newTestCACertificate(subject pkix.Name, serialNumber int64, validity metav1.Duration, currentTime func() time.Time) (*crypto.CA, error) { + caPublicKey, caPrivateKey, err := crypto.NewKeyPair() + if err != nil { + return nil, err + } + + caCert := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(validity.Duration), + SerialNumber: big.NewInt(serialNumber), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + cert, err := signCertificate(caCert, caPublicKey, caCert, caPrivateKey) + if err != nil { + return nil, err + } + + return &crypto.CA{ + Config: &crypto.TLSCertificateConfig{ + Certs: []*x509.Certificate{cert}, + Key: caPrivateKey, + }, + SerialGenerator: &crypto.RandomSerialGenerator{}, + }, nil +} + +func signCertificate(template *x509.Certificate, requestKey gcrypto.PublicKey, issuer *x509.Certificate, issuerKey gcrypto.PrivateKey) (*x509.Certificate, error) { + derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, err + } + if len(certs) != 1 { + return nil, errors.New("Expected a single certificate") + } + return certs[0], nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go new file mode 100644 index 00000000000..1d1a761bb96 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go @@ -0,0 +1,213 @@ +package certrotation + +import ( + "context" + "fmt" + "strings" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + // CertificateNotBeforeAnnotation contains the certificate expiration date in RFC3339 format. + CertificateNotBeforeAnnotation = "auth.openshift.io/certificate-not-before" + // CertificateNotAfterAnnotation contains the certificate expiration date in RFC3339 format. + CertificateNotAfterAnnotation = "auth.openshift.io/certificate-not-after" + // CertificateIssuer contains the common name of the certificate that signed another certificate. + CertificateIssuer = "auth.openshift.io/certificate-issuer" + // CertificateHostnames contains the hostnames used by a signer. + CertificateHostnames = "auth.openshift.io/certificate-hostnames" +) + +const workQueueKey = "key" + +// CertRotationController does: +// +// 1) continuously create a self-signed signing CA (via SigningRotation). +// It creates the next one when a given percentage of the validity of the old CA has passed. +// 2) maintain a CA bundle with all not yet expired CA certs. +// 3) continuously create a target cert and key signed by the latest signing CA +// It creates the next one when a given percentage of the validity of the previous cert has +// passed, or when a new CA has been created. +type CertRotationController struct { + name string + + SigningRotation SigningRotation + CABundleRotation CABundleRotation + TargetRotation TargetRotation + OperatorClient v1helpers.StaticPodOperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface +} + +func NewCertRotationController( + name string, + signingRotation SigningRotation, + caBundleRotation CABundleRotation, + targetRotation TargetRotation, + operatorClient v1helpers.StaticPodOperatorClient, +) (*CertRotationController, error) { + c := &CertRotationController{ + name: name, + + SigningRotation: signingRotation, + CABundleRotation: caBundleRotation, + TargetRotation: targetRotation, + OperatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), strings.Replace(name, "-", "_", -1)), + } + + signingRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + caBundleRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + targetRotation.Informer.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, signingRotation.Informer.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, caBundleRotation.Informer.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, targetRotation.Informer.Informer().HasSynced) + + return c, nil +} + +func (c CertRotationController) sync() error { + syncErr := c.syncWorker() + + newCondition := operatorv1.OperatorCondition{ + Type: fmt.Sprintf(condition.CertRotationDegradedConditionTypeFmt, c.name), + Status: operatorv1.ConditionFalse, + } + if syncErr != nil { + newCondition.Status = operatorv1.ConditionTrue + newCondition.Reason = "RotationError" + newCondition.Message = syncErr.Error() + } + if _, _, updateErr := v1helpers.UpdateStaticPodStatus(c.OperatorClient, v1helpers.UpdateStaticPodConditionFn(newCondition)); updateErr != nil { + return updateErr + } + + return syncErr +} + +func (c CertRotationController) syncWorker() error { + signingCertKeyPair, err := c.SigningRotation.ensureSigningCertKeyPair() + if err != nil { + return err + } + + cabundleCerts, err := c.CABundleRotation.ensureConfigMapCABundle(signingCertKeyPair) + if err != nil { + return err + } + + if err := c.TargetRotation.ensureTargetCertKeyPair(signingCertKeyPair, cabundleCerts); err != nil { + return err + } + + return nil +} + +func (c *CertRotationController) WaitForReady(stopCh <-chan struct{}) { + klog.Infof("Waiting for CertRotationController - %q", c.name) + defer klog.Infof("Finished waiting for CertRotationController - %q", c.name) + + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } +} + +// RunOnce will run the cert rotation logic, but will not try to update the static pod status. +// This eliminates the need to pass an OperatorClient and avoids dubious writes and status. +func (c *CertRotationController) RunOnce() error { + return c.syncWorker() +} + +func (c *CertRotationController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting CertRotationController - %q", c.name) + defer klog.Infof("Shutting down CertRotationController - %q", c.name) + c.WaitForReady(ctx.Done()) + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + // start a time based thread to ensure we stay up to date + go wait.Until(func() { + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() + + for { + c.queue.Add(workQueueKey) + select { + case <-ticker.C: + case <-ctx.Done(): + return + } + } + + }, time.Minute, ctx.Done()) + + // if we have a need to force rechecking the cert, use this channel to do it. + if refresher, ok := c.TargetRotation.CertCreator.(TargetCertRechecker); ok { + targetRefresh := refresher.RecheckChannel() + go wait.Until(func() { + for { + select { + case <-targetRefresh: + c.queue.Add(workQueueKey) + case <-ctx.Done(): + return + } + } + + }, time.Minute, ctx.Done()) + } + + <-ctx.Done() +} + +func (c *CertRotationController) runWorker(_ context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *CertRotationController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v: %v failed with: %v", c.name, dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *CertRotationController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/config.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/config.go new file mode 100644 index 00000000000..5f88d64063f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/config.go @@ -0,0 +1,40 @@ +package certrotation + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" +) + +// GetCertRotationScale The normal scale is based on a day. The value returned by this function +// is used to scale rotation durations instead of a day, so you can set it shorter. +func GetCertRotationScale(client kubernetes.Interface, namespace string) (time.Duration, error) { + certRotationScale := time.Duration(0) + err := wait.PollImmediate(time.Second, 1*time.Minute, func() (bool, error) { + certRotationConfig, err := client.CoreV1().ConfigMaps(namespace).Get("unsupported-cert-rotation-config", metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return true, nil + } + return false, err + } + if value, ok := certRotationConfig.Data["base"]; ok { + certRotationScale, err = time.ParseDuration(value) + if err != nil { + return false, err + } + } + return true, nil + }) + if err != nil { + return 0, err + } + if certRotationScale > 24*time.Hour { + return 0, fmt.Errorf("scale longer than 24h is not allowed: %v", certRotationScale) + } + return certRotationScale, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go new file mode 100644 index 00000000000..9c0df4ce545 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/label.go @@ -0,0 +1,61 @@ +package certrotation + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // ManagedCertificateTypeLabelName marks config map or secret as object that contains managed certificates. + // This groups all objects that store certs and allow easy query to get them all. + // The value of this label should be set to "true". + ManagedCertificateTypeLabelName = "auth.openshift.io/managed-certificate-type" +) + +type CertificateType string + +var ( + CertificateTypeCABundle CertificateType = "ca-bundle" + CertificateTypeSigner CertificateType = "signer" + CertificateTypeTarget CertificateType = "target" + CertificateTypeUnknown CertificateType = "unknown" +) + +// LabelAsManagedConfigMap add label indicating the given config map contains certificates +// that are managed. +func LabelAsManagedConfigMap(config *v1.ConfigMap, certificateType CertificateType) { + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[ManagedCertificateTypeLabelName] = string(certificateType) +} + +// LabelAsManagedConfigMap add label indicating the given secret contains certificates +// that are managed. +func LabelAsManagedSecret(secret *v1.Secret, certificateType CertificateType) { + if secret.Labels == nil { + secret.Labels = map[string]string{} + } + secret.Labels[ManagedCertificateTypeLabelName] = string(certificateType) +} + +// CertificateTypeFromObject returns the CertificateType based on the annotations of the object. +func CertificateTypeFromObject(obj runtime.Object) (CertificateType, error) { + accesor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + actualLabels := accesor.GetLabels() + if actualLabels == nil { + return CertificateTypeUnknown, nil + } + + t := CertificateType(actualLabels[ManagedCertificateTypeLabelName]) + switch t { + case CertificateTypeCABundle, CertificateTypeSigner, CertificateTypeTarget: + return t, nil + default: + return CertificateTypeUnknown, nil + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go new file mode 100644 index 00000000000..94f82d18777 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer.go @@ -0,0 +1,145 @@ +package certrotation + +import ( + "bytes" + "fmt" + "time" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// SigningRotation rotates a self-signed signing CA stored in a secret. It creates a new one when +// of the lifetime of the old CA has passed. +type SigningRotation struct { + Namespace string + Name string + Validity time.Duration + Refresh time.Duration + RefreshOnlyWhenExpired bool + + Informer corev1informers.SecretInformer + Lister corev1listers.SecretLister + Client corev1client.SecretsGetter + EventRecorder events.Recorder +} + +func (c SigningRotation) ensureSigningCertKeyPair() (*crypto.CA, error) { + originalSigningCertKeyPairSecret, err := c.Lister.Secrets(c.Namespace).Get(c.Name) + if err != nil && !apierrors.IsNotFound(err) { + return nil, err + } + signingCertKeyPairSecret := originalSigningCertKeyPairSecret.DeepCopy() + if apierrors.IsNotFound(err) { + // create an empty one + signingCertKeyPairSecret = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} + } + signingCertKeyPairSecret.Type = corev1.SecretTypeTLS + + if reason := needNewSigningCertKeyPair(signingCertKeyPairSecret.Annotations, c.Refresh, c.RefreshOnlyWhenExpired); len(reason) > 0 { + c.EventRecorder.Eventf("SignerUpdateRequired", "%q in %q requires a new signing cert/key pair: %v", c.Name, c.Namespace, reason) + if err := setSigningCertKeyPairSecret(signingCertKeyPairSecret, c.Validity); err != nil { + return nil, err + } + + LabelAsManagedSecret(signingCertKeyPairSecret, CertificateTypeSigner) + + actualSigningCertKeyPairSecret, _, err := resourceapply.ApplySecret(c.Client, c.EventRecorder, signingCertKeyPairSecret) + if err != nil { + return nil, err + } + signingCertKeyPairSecret = actualSigningCertKeyPairSecret + } + // at this point, the secret has the correct signer, so we should read that signer to be able to sign + signingCertKeyPair, err := crypto.GetCAFromBytes(signingCertKeyPairSecret.Data["tls.crt"], signingCertKeyPairSecret.Data["tls.key"]) + if err != nil { + return nil, err + } + + return signingCertKeyPair, nil +} + +func needNewSigningCertKeyPair(annotations map[string]string, refresh time.Duration, refreshOnlyWhenExpired bool) string { + notBefore, notAfter, reason := getValidityFromAnnotations(annotations) + if len(reason) > 0 { + return reason + } + + if time.Now().After(notAfter) { + return "already expired" + } + + if refreshOnlyWhenExpired { + return "" + } + + maxWait := notAfter.Sub(notBefore) / 5 + latestTime := notAfter.Add(-maxWait) + if time.Now().After(latestTime) { + return fmt.Sprintf("past its latest possible time %v", latestTime) + } + + refreshTime := notBefore.Add(refresh) + if time.Now().After(refreshTime) { + return fmt.Sprintf("past its refresh time %v", refreshTime) + } + + return "" +} + +func getValidityFromAnnotations(annotations map[string]string) (notBefore time.Time, notAfter time.Time, reason string) { + notAfterString := annotations[CertificateNotAfterAnnotation] + if len(notAfterString) == 0 { + return notBefore, notAfter, "missing notAfter" + } + notAfter, err := time.Parse(time.RFC3339, notAfterString) + if err != nil { + return notBefore, notAfter, fmt.Sprintf("bad expiry: %q", notAfterString) + } + notBeforeString := annotations[CertificateNotBeforeAnnotation] + if len(notAfterString) == 0 { + return notBefore, notAfter, "missing notBefore" + } + notBefore, err = time.Parse(time.RFC3339, notBeforeString) + if err != nil { + return notBefore, notAfter, fmt.Sprintf("bad expiry: %q", notBeforeString) + } + + return notBefore, notAfter, "" +} + +// setSigningCertKeyPairSecret creates a new signing cert/key pair and sets them in the secret +func setSigningCertKeyPairSecret(signingCertKeyPairSecret *corev1.Secret, validity time.Duration) error { + signerName := fmt.Sprintf("%s_%s@%d", signingCertKeyPairSecret.Namespace, signingCertKeyPairSecret.Name, time.Now().Unix()) + ca, err := crypto.MakeSelfSignedCAConfigForDuration(signerName, validity) + if err != nil { + return err + } + + certBytes := &bytes.Buffer{} + keyBytes := &bytes.Buffer{} + if err := ca.WriteCertConfig(certBytes, keyBytes); err != nil { + return err + } + + if signingCertKeyPairSecret.Annotations == nil { + signingCertKeyPairSecret.Annotations = map[string]string{} + } + if signingCertKeyPairSecret.Data == nil { + signingCertKeyPairSecret.Data = map[string][]byte{} + } + signingCertKeyPairSecret.Data["tls.crt"] = certBytes.Bytes() + signingCertKeyPairSecret.Data["tls.key"] = keyBytes.Bytes() + signingCertKeyPairSecret.Annotations[CertificateNotAfterAnnotation] = ca.Certs[0].NotAfter.Format(time.RFC3339) + signingCertKeyPairSecret.Annotations[CertificateNotBeforeAnnotation] = ca.Certs[0].NotBefore.Format(time.RFC3339) + signingCertKeyPairSecret.Annotations[CertificateIssuer] = ca.Certs[0].Issuer.CommonName + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer_test.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer_test.go new file mode 100644 index 00000000000..209a0753934 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/signer_test.go @@ -0,0 +1,132 @@ +package certrotation + +import ( + "strings" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubefake "k8s.io/client-go/kubernetes/fake" + corev1listers "k8s.io/client-go/listers/core/v1" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestEnsureSigningCertKeyPair(t *testing.T) { + tests := []struct { + name string + + initialSecret *corev1.Secret + + verifyActions func(t *testing.T, client *kubefake.Clientset) + expectedError string + }{ + { + name: "initial create", + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + t.Helper() + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[0].Matches("get", "secrets") { + t.Error(actions[0]) + } + if !actions[1].Matches("create", "secrets") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.CreateAction).GetObject().(*corev1.Secret) + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeSigner { + t.Errorf("expected certificate type 'signer', got: %v", certType) + } + if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { + t.Error(actual.Data) + } + }, + }, + { + name: "update no annotations", + initialSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "signer"}, + }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + t.Helper() + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[1].Matches("update", "secrets") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.Secret) + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeSigner { + t.Errorf("expected certificate type 'signer', got: %v", certType) + } + if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { + t.Error(actual.Data) + } + }, + }, + { + name: "update no work", + initialSecret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "signer", + Annotations: map[string]string{ + "auth.openshift.io/certificate-not-after": "2108-09-08T22:47:31-07:00", + "auth.openshift.io/certificate-not-before": "2108-09-08T20:47:31-07:00", + }}, + }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + t.Helper() + actions := client.Actions() + if len(actions) != 0 { + t.Fatal(spew.Sdump(actions)) + } + }, + expectedError: "certFile missing", // this means we tried to read the cert from the existing secret. If we created one, we fail in the client check + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + + client := kubefake.NewSimpleClientset() + if test.initialSecret != nil { + indexer.Add(test.initialSecret) + client = kubefake.NewSimpleClientset(test.initialSecret) + } + + c := &SigningRotation{ + Namespace: "ns", + Name: "signer", + Validity: 24 * time.Hour, + Refresh: 12 * time.Hour, + Client: client.CoreV1(), + Lister: corev1listers.NewSecretLister(indexer), + EventRecorder: events.NewInMemoryRecorder("test"), + } + + _, err := c.ensureSigningCertKeyPair() + switch { + case err != nil && len(test.expectedError) == 0: + t.Error(err) + case err != nil && !strings.Contains(err.Error(), test.expectedError): + t.Error(err) + case err == nil && len(test.expectedError) != 0: + t.Errorf("missing %q", test.expectedError) + } + + test.verifyActions(t, client) + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go new file mode 100644 index 00000000000..c768c6c91b3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target.go @@ -0,0 +1,278 @@ +package certrotation + +import ( + "crypto/x509" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + + "github.com/openshift/library-go/pkg/certs" + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// TargetRotation rotates a key and cert signed by a CA. It creates a new one when +// of the lifetime of the old cert has passed, or if the common name of the CA changes. +type TargetRotation struct { + Namespace string + Name string + Validity time.Duration + Refresh time.Duration + // RefreshOnlyWhenExpired allows rotating only certs that are already expired. (for autorecovery) + // If false (regular flow) it rotates at the refresh interval but no later then 4/5 of the cert lifetime. + RefreshOnlyWhenExpired bool + + CertCreator TargetCertCreator + + Informer corev1informers.SecretInformer + Lister corev1listers.SecretLister + Client corev1client.SecretsGetter + EventRecorder events.Recorder +} + +type TargetCertCreator interface { + NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) + NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration, refreshOnlyWhenExpired bool) string + // SetAnnotations gives an option to override or set additional annotations + SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string +} + +type TargetCertRechecker interface { + RecheckChannel() <-chan struct{} +} + +func (c TargetRotation) ensureTargetCertKeyPair(signingCertKeyPair *crypto.CA, caBundleCerts []*x509.Certificate) error { + // at this point our trust bundle has been updated. We don't know for sure that consumers have updated, but that's why we have a second + // validity percentage. We always check to see if we need to sign. Often we are signing with an old key or we have no target + // and need to mint one + // TODO do the cross signing thing, but this shows the API consumers want and a very simple impl. + originalTargetCertKeyPairSecret, err := c.Lister.Secrets(c.Namespace).Get(c.Name) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + targetCertKeyPairSecret := originalTargetCertKeyPairSecret.DeepCopy() + if apierrors.IsNotFound(err) { + // create an empty one + targetCertKeyPairSecret = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: c.Namespace, Name: c.Name}} + } + targetCertKeyPairSecret.Type = corev1.SecretTypeTLS + + if reason := needNewTargetCertKeyPair(targetCertKeyPairSecret.Annotations, signingCertKeyPair, caBundleCerts, c.Refresh, c.RefreshOnlyWhenExpired); len(reason) > 0 { + c.EventRecorder.Eventf("TargetUpdateRequired", "%q in %q requires a new target cert/key pair: %v", c.Name, c.Namespace, reason) + if err := setTargetCertKeyPairSecret(targetCertKeyPairSecret, c.Validity, signingCertKeyPair, c.CertCreator); err != nil { + return err + } + + LabelAsManagedSecret(targetCertKeyPairSecret, CertificateTypeTarget) + + actualTargetCertKeyPairSecret, _, err := resourceapply.ApplySecret(c.Client, c.EventRecorder, targetCertKeyPairSecret) + if err != nil { + return err + } + targetCertKeyPairSecret = actualTargetCertKeyPairSecret + } + + return nil +} + +func needNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration, refreshOnlyWhenExpired bool) string { + if reason := needNewTargetCertKeyPairForTime(annotations, signer, refresh, refreshOnlyWhenExpired); len(reason) > 0 { + return reason + } + + // check the signer common name against all the common names in our ca bundle so we don't refresh early + signerCommonName := annotations[CertificateIssuer] + if len(signerCommonName) == 0 { + return "missing issuer name" + } + for _, caCert := range caBundleCerts { + if signerCommonName == caCert.Subject.CommonName { + return "" + } + } + + return fmt.Sprintf("issuer %q, not in ca bundle:\n%s", signerCommonName, certs.CertificateBundleToString(caBundleCerts)) +} + +// needNewTargetCertKeyPairForTime returns true when +// 1. when notAfter or notBefore is missing in the annotation +// 2. when notAfter or notBefore is malformed +// 3. when now is after the notAfter +// 4. when now is after notAfter+refresh AND the signer has been valid +// for more than 5% of the "extra" time we renew the target +// +//in other words, we rotate if +// +//our old CA is gone from the bundle (then we are pretty late to the renewal party) +//or the cert expired (then we are also pretty late) +//or we are over the renewal percentage of the validity, but only if the new CA at least 10% into its age. +//Maybe worth a go doc. +// +//So in general we need to see a signing CA at least aged 10% within 1-percentage of the cert validity. +// +//Hence, if the CAs are rotated too fast (like CA percentage around 10% or smaller), we will not hit the time to make use of the CA. Or if the cert renewal percentage is at 90%, there is not much time either. +// +//So with a cert percentage of 75% and equally long CA and cert validities at the worst case we start at 85% of the cert to renew, trying again every minute. +func needNewTargetCertKeyPairForTime(annotations map[string]string, signer *crypto.CA, refresh time.Duration, refreshOnlyWhenExpired bool) string { + notBefore, notAfter, reason := getValidityFromAnnotations(annotations) + if len(reason) > 0 { + return reason + } + + if time.Now().After(notAfter) { + return "already expired" + } + + if refreshOnlyWhenExpired { + return "" + } + + maxWait := notAfter.Sub(notBefore) / 5 + latestTime := notAfter.Add(-maxWait) + if time.Now().After(latestTime) { + return fmt.Sprintf("past its latest possible time %v", latestTime) + } + + // If Certificate is past its refresh time, we may have action to take. We only do this if the signer is old enough. + refreshTime := notBefore.Add(refresh) + if time.Now().After(refreshTime) { + // make sure the signer has been valid for more than 10% of the target's refresh time. + timeToWaitForTrustRotation := refresh / 10 + if time.Now().After(signer.Config.Certs[0].NotBefore.Add(time.Duration(timeToWaitForTrustRotation))) { + return fmt.Sprintf("past its refresh time %v", refreshTime) + } + } + + return "" +} + +// setTargetCertKeyPairSecret creates a new cert/key pair and sets them in the secret. Only one of client, serving, or signer rotation may be specified. +// TODO refactor with an interface for actually signing and move the one-of check higher in the stack. +func setTargetCertKeyPairSecret(targetCertKeyPairSecret *corev1.Secret, validity time.Duration, signer *crypto.CA, certCreator TargetCertCreator) error { + if targetCertKeyPairSecret.Annotations == nil { + targetCertKeyPairSecret.Annotations = map[string]string{} + } + if targetCertKeyPairSecret.Data == nil { + targetCertKeyPairSecret.Data = map[string][]byte{} + } + + // our annotation is based on our cert validity, so we want to make sure that we don't specify something past our signer + targetValidity := validity + remainingSignerValidity := signer.Config.Certs[0].NotAfter.Sub(time.Now()) + if remainingSignerValidity < validity { + targetValidity = remainingSignerValidity + } + + certKeyPair, err := certCreator.NewCertificate(signer, targetValidity) + if err != nil { + return err + } + + targetCertKeyPairSecret.Data["tls.crt"], targetCertKeyPairSecret.Data["tls.key"], err = certKeyPair.GetPEMBytes() + if err != nil { + return err + } + targetCertKeyPairSecret.Annotations[CertificateNotAfterAnnotation] = certKeyPair.Certs[0].NotAfter.Format(time.RFC3339) + targetCertKeyPairSecret.Annotations[CertificateNotBeforeAnnotation] = certKeyPair.Certs[0].NotBefore.Format(time.RFC3339) + targetCertKeyPairSecret.Annotations[CertificateIssuer] = certKeyPair.Certs[0].Issuer.CommonName + certCreator.SetAnnotations(certKeyPair, targetCertKeyPairSecret.Annotations) + + return nil +} + +type ClientRotation struct { + UserInfo user.Info +} + +func (r *ClientRotation) NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) { + return signer.MakeClientCertificateForDuration(r.UserInfo, validity) +} + +func (r *ClientRotation) NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration, refreshOnlyWhenExpired bool) string { + return needNewTargetCertKeyPair(annotations, signer, caBundleCerts, refresh, refreshOnlyWhenExpired) +} + +func (r *ClientRotation) SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string { + return annotations +} + +type ServingRotation struct { + Hostnames ServingHostnameFunc + CertificateExtensionFn []crypto.CertificateExtensionFunc + HostnamesChanged <-chan struct{} +} + +func (r *ServingRotation) NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) { + if len(r.Hostnames()) == 0 { + return nil, fmt.Errorf("no hostnames set") + } + return signer.MakeServerCertForDuration(sets.NewString(r.Hostnames()...), validity, r.CertificateExtensionFn...) +} + +func (r *ServingRotation) RecheckChannel() <-chan struct{} { + return r.HostnamesChanged +} + +func (r *ServingRotation) NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration, refreshOnlyWhenExpired bool) string { + reason := needNewTargetCertKeyPair(annotations, signer, caBundleCerts, refresh, refreshOnlyWhenExpired) + if len(reason) > 0 { + return reason + } + + return r.missingHostnames(annotations) +} + +func (r *ServingRotation) missingHostnames(annotations map[string]string) string { + existingHostnames := sets.NewString(strings.Split(annotations[CertificateHostnames], ",")...) + requiredHostnames := sets.NewString(r.Hostnames()...) + if !existingHostnames.Equal(requiredHostnames) { + existingNotRequired := existingHostnames.Difference(requiredHostnames) + requiredNotExisting := requiredHostnames.Difference(existingHostnames) + return fmt.Sprintf("%q are existing and not required, %q are required and not existing", strings.Join(existingNotRequired.List(), ","), strings.Join(requiredNotExisting.List(), ",")) + } + + return "" +} + +func (r *ServingRotation) SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string { + hostnames := sets.String{} + for _, ip := range cert.Certs[0].IPAddresses { + hostnames.Insert(ip.String()) + } + for _, dnsName := range cert.Certs[0].DNSNames { + hostnames.Insert(dnsName) + } + + // List does a sort so that we have a consistent representation + annotations[CertificateHostnames] = strings.Join(hostnames.List(), ",") + return annotations +} + +type ServingHostnameFunc func() []string + +type SignerRotation struct { + SignerName string +} + +func (r *SignerRotation) NewCertificate(signer *crypto.CA, validity time.Duration) (*crypto.TLSCertificateConfig, error) { + signerName := fmt.Sprintf("%s_@%d", r.SignerName, time.Now().Unix()) + return crypto.MakeCAConfigForDuration(signerName, validity, signer) +} + +func (r *SignerRotation) NeedNewTargetCertKeyPair(annotations map[string]string, signer *crypto.CA, caBundleCerts []*x509.Certificate, refresh time.Duration, refreshOnlyWhenExpired bool) string { + return needNewTargetCertKeyPair(annotations, signer, caBundleCerts, refresh, refreshOnlyWhenExpired) +} + +func (r *SignerRotation) SetAnnotations(cert *crypto.TLSCertificateConfig, annotations map[string]string) map[string]string { + return annotations +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target_test.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target_test.go new file mode 100644 index 00000000000..87ae13100d1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/target_test.go @@ -0,0 +1,436 @@ +package certrotation + +import ( + "crypto/x509/pkix" + "strings" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubefake "k8s.io/client-go/kubernetes/fake" + corev1listers "k8s.io/client-go/listers/core/v1" + clienttesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" +) + +func TestNeedNewTargetCertKeyPairForTime(t *testing.T) { + now := time.Now() + nowFn := func() time.Time { return now } + elevenMinutesBeforeNow := time.Now().Add(-11 * time.Minute) + elevenMinutesBeforeNowFn := func() time.Time { return elevenMinutesBeforeNow } + nowCert, err := newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: 200 * time.Minute}, nowFn) + if err != nil { + t.Fatal(err) + } + elevenMinutesBeforeNowCert, err := newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: 200 * time.Minute}, elevenMinutesBeforeNowFn) + if err != nil { + t.Fatal(err) + } + + tests := []struct { + name string + + annotations map[string]string + signerFn func() (*crypto.CA, error) + refresh time.Duration + refreshOnlyWhenExpired bool + + expected string + }{ + { + name: "from nothing", + signerFn: func() (*crypto.CA, error) { + return nowCert, nil + }, + refresh: 50 * time.Minute, + expected: "missing notAfter", + }, + { + name: "malformed", + annotations: map[string]string{ + CertificateNotAfterAnnotation: "malformed", + CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339), + }, + signerFn: func() (*crypto.CA, error) { + return nowCert, nil + }, + refresh: 50 * time.Minute, + expected: `bad expiry: "malformed"`, + }, + { + name: "past midpoint and cert is ready", + annotations: map[string]string{ + CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339), + CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339), + }, + signerFn: func() (*crypto.CA, error) { + return elevenMinutesBeforeNowCert, nil + }, + refresh: 40 * time.Minute, + expected: "past its refresh time", + }, + { + name: "past midpoint and cert is new", + annotations: map[string]string{ + CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339), + CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339), + }, + signerFn: func() (*crypto.CA, error) { + return nowCert, nil + }, + refresh: 40 * time.Minute, + expected: "", + }, + { + name: "past refresh but not expired", + annotations: map[string]string{ + CertificateNotAfterAnnotation: now.Add(45 * time.Minute).Format(time.RFC3339), + CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339), + }, + signerFn: func() (*crypto.CA, error) { + return nowCert, nil + }, + refresh: 40 * time.Minute, + refreshOnlyWhenExpired: true, + expected: "", + }, + { + name: "already expired", + annotations: map[string]string{ + CertificateNotAfterAnnotation: now.Add(-1 * time.Millisecond).Format(time.RFC3339), + CertificateNotBeforeAnnotation: now.Add(-45 * time.Minute).Format(time.RFC3339), + }, + signerFn: func() (*crypto.CA, error) { + return nowCert, nil + }, + refresh: 30 * time.Minute, + refreshOnlyWhenExpired: true, + expected: "already expired", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + signer, err := test.signerFn() + if err != nil { + t.Fatal(err) + } + + actual := needNewTargetCertKeyPairForTime(test.annotations, signer, test.refresh, test.refreshOnlyWhenExpired) + if !strings.HasPrefix(actual, test.expected) { + t.Errorf("expected %v, got %v", test.expected, actual) + } + }) + } +} + +func TestEnsureTargetCertKeyPair(t *testing.T) { + tests := []struct { + name string + + initialSecretFn func() *corev1.Secret + caFn func() (*crypto.CA, error) + + verifyActions func(t *testing.T, client *kubefake.Clientset) + expectedError string + }{ + { + name: "initial create", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialSecretFn: func() *corev1.Secret { return nil }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[0].Matches("get", "secrets") { + t.Error(actions[0]) + } + if !actions[1].Matches("create", "secrets") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.CreateAction).GetObject().(*corev1.Secret) + if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { + t.Error(actual.Data) + } + }, + }, + { + name: "update write", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialSecretFn: func() *corev1.Secret { + caBundleSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "target-secret"}, + Data: map[string][]byte{}, + } + return caBundleSecret + }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[1].Matches("update", "secrets") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.Secret) + if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { + t.Error(actual.Data) + } + if actual.Annotations[CertificateHostnames] != "bar,foo" { + t.Error(actual.Annotations[CertificateHostnames]) + } + + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + + client := kubefake.NewSimpleClientset() + if startingObj := test.initialSecretFn(); startingObj != nil { + indexer.Add(startingObj) + client = kubefake.NewSimpleClientset(startingObj) + } + + c := &TargetRotation{ + Namespace: "ns", + Validity: 24 * time.Hour, + Refresh: 12 * time.Hour, + Name: "target-secret", + CertCreator: &ServingRotation{ + Hostnames: func() []string { return []string{"foo", "bar"} }, + }, + + Client: client.CoreV1(), + Lister: corev1listers.NewSecretLister(indexer), + EventRecorder: events.NewInMemoryRecorder("test"), + } + + newCA, err := test.caFn() + if err != nil { + t.Fatal(err) + } + err = c.ensureTargetCertKeyPair(newCA, newCA.Config.Certs) + switch { + case err != nil && len(test.expectedError) == 0: + t.Error(err) + case err != nil && !strings.Contains(err.Error(), test.expectedError): + t.Error(err) + case err == nil && len(test.expectedError) != 0: + t.Errorf("missing %q", test.expectedError) + } + + test.verifyActions(t, client) + }) + } +} + +func TestServerHostnameCheck(t *testing.T) { + tests := []struct { + name string + + existingHostnames string + requiredHostnames []string + + expected string + }{ + { + name: "nothing", + existingHostnames: "", + requiredHostnames: []string{"foo"}, + expected: `"" are existing and not required, "foo" are required and not existing`, + }, + { + name: "exists", + existingHostnames: "foo", + requiredHostnames: []string{"foo"}, + expected: "", + }, + { + name: "hasExtra", + existingHostnames: "foo,bar", + requiredHostnames: []string{"foo"}, + expected: `"bar" are existing and not required, "" are required and not existing`, + }, + { + name: "needsAnother", + existingHostnames: "foo", + requiredHostnames: []string{"foo", "bar"}, + expected: `"" are existing and not required, "bar" are required and not existing`, + }, + { + name: "both", + existingHostnames: "foo,baz", + requiredHostnames: []string{"foo", "bar"}, + expected: `"baz" are existing and not required, "bar" are required and not existing`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + r := &ServingRotation{ + Hostnames: func() []string { return test.requiredHostnames }, + } + actual := r.missingHostnames(map[string]string{CertificateHostnames: test.existingHostnames}) + if actual != test.expected { + t.Fatal(actual) + } + }) + } +} + +func TestEnsureTargetSignerCertKeyPair(t *testing.T) { + tests := []struct { + name string + + initialSecretFn func() *corev1.Secret + caFn func() (*crypto.CA, error) + + verifyActions func(t *testing.T, client *kubefake.Clientset) + expectedError string + }{ + { + name: "initial create", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialSecretFn: func() *corev1.Secret { return nil }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[0].Matches("get", "secrets") { + t.Error(actions[0]) + } + if !actions[1].Matches("create", "secrets") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.CreateAction).GetObject().(*corev1.Secret) + if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { + t.Error(actual.Data) + } + + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeTarget { + t.Errorf("expected certificate type 'target', got: %v", certType) + } + + signingCertKeyPair, err := crypto.GetCAFromBytes(actual.Data["tls.crt"], actual.Data["tls.key"]) + if err != nil { + t.Error(actual.Data) + } + if signingCertKeyPair.Config.Certs[0].Issuer.CommonName != "signer-tests" { + t.Error(signingCertKeyPair.Config.Certs[0].Issuer.CommonName) + + } + if signingCertKeyPair.Config.Certs[1].Subject.CommonName != "signer-tests" { + t.Error(signingCertKeyPair.Config.Certs[0].Issuer.CommonName) + } + }, + }, + { + name: "update write", + caFn: func() (*crypto.CA, error) { + return newTestCACertificate(pkix.Name{CommonName: "signer-tests"}, int64(1), metav1.Duration{Duration: time.Hour * 24 * 60}, time.Now) + }, + initialSecretFn: func() *corev1.Secret { + caBundleSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "target-secret"}, + Data: map[string][]byte{}, + } + return caBundleSecret + }, + verifyActions: func(t *testing.T, client *kubefake.Clientset) { + actions := client.Actions() + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + + if !actions[1].Matches("update", "secrets") { + t.Error(actions[1]) + } + + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.Secret) + if len(actual.Data["tls.crt"]) == 0 || len(actual.Data["tls.key"]) == 0 { + t.Error(actual.Data) + } + if certType, _ := CertificateTypeFromObject(actual); certType != CertificateTypeTarget { + t.Errorf("expected certificate type 'target', got: %v", certType) + } + + signingCertKeyPair, err := crypto.GetCAFromBytes(actual.Data["tls.crt"], actual.Data["tls.key"]) + if err != nil { + t.Error(actual.Data) + } + if signingCertKeyPair.Config.Certs[0].Issuer.CommonName != "signer-tests" { + t.Error(signingCertKeyPair.Config.Certs[0].Issuer.CommonName) + + } + if signingCertKeyPair.Config.Certs[1].Subject.CommonName != "signer-tests" { + t.Error(signingCertKeyPair.Config.Certs[0].Issuer.CommonName) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + + client := kubefake.NewSimpleClientset() + if startingObj := test.initialSecretFn(); startingObj != nil { + indexer.Add(startingObj) + client = kubefake.NewSimpleClientset(startingObj) + } + + c := &TargetRotation{ + Namespace: "ns", + Validity: 24 * time.Hour, + Refresh: 12 * time.Hour, + Name: "target-secret", + CertCreator: &SignerRotation{ + SignerName: "lower-signer", + }, + + Client: client.CoreV1(), + Lister: corev1listers.NewSecretLister(indexer), + EventRecorder: events.NewInMemoryRecorder("test"), + } + + newCA, err := test.caFn() + if err != nil { + t.Fatal(err) + } + err = c.ensureTargetCertKeyPair(newCA, newCA.Config.Certs) + switch { + case err != nil && len(test.expectedError) == 0: + t.Error(err) + case err != nil && !strings.Contains(err.Error(), test.expectedError): + t.Error(err) + case err == nil && len(test.expectedError) != 0: + t.Errorf("missing %q", test.expectedError) + } + + test.verifyActions(t, client) + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-expired.crt b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-expired.crt new file mode 100644 index 00000000000..b6140c7abb8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-expired.crt @@ -0,0 +1,14 @@ +-----BEGIN CERTIFICATE----- +MIICMjCCAdmgAwIBAgIUdTpx2/qycBZJltbEdfTyfKyJjG0wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTIwODAwWhcN +MTgwNzMwMTIwOTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABMlJR5tWK7vgCytCxBQov1xNp+R9RG2wI1w9 +SXIn+Za97Nf6krdyUDd+P6QSSJDkRTQZDsGiCpJhgd5kAzFNUkajgZgwgZUwDgYD +VR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAw +HQYDVR0OBBYEFOERFpshmIXspqXoox9gnSFGmm3PMB8GA1UdIwQYMBaAFCtdC7xd +NJKjmyiwhZJH7LBLOLrgMCAGA1UdEQQZMBeCFWV0Y2Rwcm94eS10ZXN0cy5sb2Nh +bDAKBggqhkjOPQQDAgNHADBEAiAvsq9L5uk0jg3v2z1xemAUwPXrEIAcbJhXFfC0 +QmVGGgIgFT9d/inKJcm/NfAgDGkoXSvHGv0NKAZpR32Dqriobh4= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-multiple.crt b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-multiple.crt new file mode 100644 index 00000000000..b321982a740 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls-multiple.crt @@ -0,0 +1,39 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUQ0hq1Lmd6ujao+8Iy6LfpMdyNI8wCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMDAwWhcN +MjMwNzI5MTExMDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABGoowUY2eQdvaHG4S/UMYD6mjs6/P7mmhizl +KWO03gq2eVSsbiYAnCJok3o2WQ01GtcS6bOUJ1DOG0gLTRfQ/lWjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBQmqCeN+suT +0JjgSxtCqTln7zonHjAfBgNVHSMEGDAWgBQmqCeN+suT0JjgSxtCqTln7zonHjAK +BggqhkjOPQQDAgNIADBFAiAUKV8vkiIoCiqtHQsp3PrUUV3He2B9K1tQgA8loTa+ +IQIhANPbCDVoPSFsX0I5iG/DQl/MmTo/tlsmNkN99j1j2JIM +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUU8ZsD37pcA1UYkgwhR6d/KjdGeAwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABLupsOF50q6GE7z2US77t5iLGe9wdOFwHssC +jUjCEGvJ/d2sGMxdiABJrrB8gau6TilrJCy9ZTYj56fzdReUnsKjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBRhaKyklrnI +wd2kg84t1D8CvDVtdjAfBgNVHSMEGDAWgBRhaKyklrnIwd2kg84t1D8CvDVtdjAK +BggqhkjOPQQDAgNIADBFAiAOCYqtOamRapNc+XxR7IFzlr7Si7EvjQ+ej5SKHb7g +rgIhAIBd1dtMc0KJSFsoxnQZailkFi5Nlea2eHU1wEDKVb40 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB/zCCAaagAwIBAgIUVCSMefpK8uxDKy87jKnwc97DseIwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTExMTAwWhcN +MjMwNzI5MTExMTAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABOhGVuxW0nEQ5REqQdRF1eJ7OUOdXB/oDJed +Jr1ezcyhJyCRvD9DfadSBvMHFyzw7ssBIIMm4C3Eufj96M3tSACjZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBTLR9qOF3Hh +if8KUbkrRYUK13xSSDAfBgNVHSMEGDAWgBTLR9qOF3Hhif8KUbkrRYUK13xSSDAK +BggqhkjOPQQDAgNHADBEAiAFD2zRXnp40wVeffwpkU+ToFF6Nts/HJk02iMr/+km +RgIgRLZxonlkyLlUHucMKC2V+4UJ9akEbu/bhCXKuQb2DgY= +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.crt b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.crt new file mode 100644 index 00000000000..862bdbc2df0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.crt @@ -0,0 +1,13 @@ +-----BEGIN CERTIFICATE----- +MIICADCCAaagAwIBAgIUWke4fSfaCH+2MLSFeTHBpoi+h1YwCgYIKoZIzj0EAwIw +TDELMAkGA1UEBhMCVVMxFjAUBgNVBAgTDVNhbiBGcmFuY2lzY28xCzAJBgNVBAcT +AkNBMRgwFgYDVQQDEw9ldGNkcHJveHktdGVzdHMwHhcNMTgwNzMwMTA1MDAwWhcN +MjMwNzI5MTA1MDAwWjBMMQswCQYDVQQGEwJVUzEWMBQGA1UECBMNU2FuIEZyYW5j +aXNjbzELMAkGA1UEBxMCQ0ExGDAWBgNVBAMTD2V0Y2Rwcm94eS10ZXN0czBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABHoqBfTXFdWRATfdrr/v5UriZBxmzL5aiwLZ +VRUg2UZNnoH2JLUcDkqx3IQakjoVijweiQeqxAai3mxjtgxbh+ajZjBkMA4GA1Ud +DwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgECMB0GA1UdDgQWBBSgDE3RpOiH +Gv7AEnYKRk46zVIkbzAfBgNVHSMEGDAWgBSgDE3RpOiHGv7AEnYKRk46zVIkbzAK +BggqhkjOPQQDAgNIADBFAiA3Gg/gwiEfjclpQYyd3qTgdCWzud8GKRdjVK3Z2BXW +swIhANMuxi0Y41mwcmh3a2icpdeGHGyGNdNDe8uF+5csuNUp +-----END CERTIFICATE----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.key b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.key new file mode 100644 index 00000000000..83cf18be622 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/testfiles/tls.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIC+UyR59JEbt/qjWZG/87ZYzk0pOgTBmpx5R0w6uG66JoAoGCCqGSM49 +AwEHoUQDQgAEeioF9NcV1ZEBN92uv+/lSuJkHGbMvlqLAtlVFSDZRk2egfYktRwO +SrHchBqSOhWKPB6JB6rEBqLebGO2DFuH5g== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go new file mode 100644 index 00000000000..def0d6a9033 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go @@ -0,0 +1,66 @@ +package condition + +const ( + // ManagementStateDegradedConditionType is true when the operator ManagementState is not "Managed".. + // Possible reasons are Unmanaged, Removed or Unknown. Any of these cases means the operator is not actively managing the operand. + // This condition is set to false when the ManagementState is set to back to "Managed". + ManagementStateDegradedConditionType = "ManagementStateDegraded" + + // UnsupportedConfigOverridesUpgradeableConditionType is true when operator unsupported config overrides is changed. + // When NoUnsupportedConfigOverrides reason is given it means there are no unsupported config overrides. + // When UnsupportedConfigOverridesSet reason is given it means the unsupported config overrides are set, which might impact the ability + // of operator to successfully upgrade its operand. + UnsupportedConfigOverridesUpgradeableConditionType = "UnsupportedConfigOverridesUpgradeable" + + // MonitoringResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the ServiceMonitor + // CR resource, which is required by monitoring operator to collect Prometheus data from the operator. When this condition is true and the ServiceMonitor + // is already created, it won't have impact on collecting metrics. However, if the ServiceMonitor was not created, the metrics won't be available for + // collection until this condition is set to false. + // The condition is set to false automatically when the operator successfully synchronize the ServiceMonitor resource. + MonitoringResourceControllerDegradedConditionType = "MonitoringResourceControllerDegraded" + + // BackingResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the resources needed + // to successfully run the installer pods (installer CRB and SA). If these were already created, this condition is not fatal, however if the resources + // were not created it means the installer pod creation will fail. + // This condition is set to false when the operator can successfully synchronize installer SA and CRB. + BackingResourceControllerDegradedConditionType = "BackingResourceControllerDegraded" + + // StaticPodsDegradedConditionType is true when the operator observe errors when installing the new revision static pods. + // This condition report Error reason when the pods are terminated or not ready or waiting during which the operand quality of service is degraded. + // This condition is set to False when the pods change state to running and are observed ready. + StaticPodsDegradedConditionType = "StaticPodsDegraded" + + // ConfigObservationDegradedConditionType is true when the operator failed to observe or process configuration change. + // This is not transient condition and normally a correction or manual intervention is required on the config custom resource. + ConfigObservationDegradedConditionType = "ConfigObservationDegraded" + + // ResourceSyncControllerDegradedConditionType is true when the operator failed to synchronize one or more secrets or config maps required + // to run the operand. Operand ability to provide service might be affected by this condition. + // This condition is set to false when the operator is able to create secrets and config maps. + ResourceSyncControllerDegradedConditionType = "ResourceSyncControllerDegraded" + + // CertRotationDegradedConditionTypeFmt is true when the operator failed to properly rotate one or more certificates required by the operand. + // The RotationError reason is given with message describing details of this failure. This condition can be fatal when ignored as the existing certificate(s) + // validity can expire and without rotating/renewing them manual recovery might be required to fix the cluster. + CertRotationDegradedConditionTypeFmt = "CertRotation_%s_Degraded" + + // InstallerControllerDegradedConditionType is true when the operator is not able to create new installer pods so the new revisions + // cannot be rolled out. This might happen when one or more required secrets or config maps does not exists. + // In case the missing secret or config map is available, this condition is automatically set to false. + InstallerControllerDegradedConditionType = "InstallerControllerDegraded" + + // NodeInstallerDegradedConditionType is true when the operator is not able to create new installer pods because there are no schedulable nodes + // available to run the installer pods. + // The AllNodesAtLatestRevision reason is set when all master nodes are updated to the latest revision. It is false when some masters are pending revision. + // ZeroNodesActive reason is set to True when no active master nodes are observed. Is set to False when there is at least one active master node. + NodeInstallerDegradedConditionType = "NodeInstallerDegraded" + + // RevisionControllerDegradedConditionType is true when the operator is not able to create new desired revision because an error occurred when + // the operator attempted to created required resource(s) (secrets, configmaps, ...). + // This condition mean no new revision will be created. + RevisionControllerDegradedConditionType = "RevisionControllerDegraded" + + // NodeControllerDegradedConditionType is true when the operator observed a master node that is not ready. + // Note that a node is not ready when its Condition.NodeReady wasn't set to true + NodeControllerDegradedConditionType = "NodeControllerDegraded" +) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile.go new file mode 100644 index 00000000000..f38f0816860 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile.go @@ -0,0 +1,104 @@ +package apiserver + +import ( + "fmt" + "reflect" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +// APIServerLister lists APIServer information and allows resources to be synced +type APIServerLister interface { + APIServerLister() configlistersv1.APIServerLister + PreRunHasSynced() []cache.InformerSynced +} + +// ObserveTLSSecurityProfile observes APIServer.Spec.TLSSecurityProfile field and sets +// the ServingInfo.MinTLSVersion, ServingInfo.CipherSuites fields +func ObserveTLSSecurityProfile(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + var ( + minTlSVersionPath = []string{"servingInfo", "minTLSVersion"} + cipherSuitesPath = []string{"servingInfo", "cipherSuites"} + ) + + listers := genericListers.(APIServerLister) + errs := []error{} + + currentMinTLSVersion, _, versionErr := unstructured.NestedString(existingConfig, minTlSVersionPath...) + if versionErr != nil { + errs = append(errs, fmt.Errorf("failed to retrieve spec.servingInfo.minTLSVersion: %v", versionErr)) + } + + currentCipherSuites, _, suitesErr := unstructured.NestedStringSlice(existingConfig, cipherSuitesPath...) + if suitesErr != nil { + errs = append(errs, fmt.Errorf("failed to retrieve spec.servingInfo.cipherSuites: %v", suitesErr)) + } + + apiServer, err := listers.APIServerLister().Get("cluster") + if errors.IsNotFound(err) { + klog.Warningf("apiserver.config.openshift.io/cluster: not found") + apiServer = &configv1.APIServer{} + } else if err != nil { + return existingConfig, append(errs, err) + } + + observedConfig := map[string]interface{}{ + "servingInfo": map[string]interface{}{}, + } + observedMinTLSVersion, observedCipherSuites := getSecurityProfileCiphers(apiServer.Spec.TLSSecurityProfile) + if err = unstructured.SetNestedField(observedConfig, observedMinTLSVersion, minTlSVersionPath...); err != nil { + errs = append(errs, err) + } + if err = unstructured.SetNestedStringSlice(observedConfig, observedCipherSuites, cipherSuitesPath...); err != nil { + errs = append(errs, err) + } + + if observedMinTLSVersion != currentMinTLSVersion { + recorder.Eventf("ObserveTLSSecurityProfile", "minTLSVersion changed to %s", observedMinTLSVersion) + } + if !reflect.DeepEqual(observedCipherSuites, currentCipherSuites) { + recorder.Eventf("ObserveTLSSecurityProfile", "cipherSuites changed to %q", observedCipherSuites) + } + + return observedConfig, errs +} + +// Extracts the minimum TLS version and cipher suites from TLSSecurityProfile object, +// Converts the ciphers to IANA names as supported by Kube ServingInfo config. +// If profile is nil, returns config defined by the Intermediate TLS Profile +func getSecurityProfileCiphers(profile *configv1.TLSSecurityProfile) (string, []string) { + var profileType configv1.TLSProfileType + if profile == nil { + profileType = configv1.TLSProfileIntermediateType + } else { + profileType = profile.Type + } + + var profileSpec *configv1.TLSProfileSpec + if profileType == configv1.TLSProfileCustomType { + if profile.Custom != nil { + profileSpec = &profile.Custom.TLSProfileSpec + } + } else { + profileSpec = configv1.TLSProfiles[profileType] + } + + // nothing found / custom type set but no actual custom spec + if profileSpec == nil { + profileSpec = configv1.TLSProfiles[configv1.TLSProfileIntermediateType] + } + + // need to remap all Ciphers to their respective IANA names used by Go + return string(profileSpec.MinTLSVersion), crypto.OpenSSLToIANACipherSuites(profileSpec.Ciphers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile_test.go new file mode 100644 index 00000000000..3fc76278706 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/apiserver/observe_tlssecurityprofile_test.go @@ -0,0 +1,117 @@ +package apiserver + +import ( + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" +) + +type testLister struct { + lister configlistersv1.APIServerLister +} + +func (l testLister) APIServerLister() configlistersv1.APIServerLister { + return l.lister +} + +func (l testLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return nil +} + +func (l testLister) PreRunHasSynced() []cache.InformerSynced { + return nil +} +func TestObserveTLSSecurityProfile(t *testing.T) { + existingConfig := map[string]interface{}{ + "minTLSVersion": "VersionTLS11", + "cipherSuites": []string{"DES-CBC3-SHA"}, + } + + tests := []struct { + name string + config *configv1.TLSSecurityProfile + existing map[string]interface{} + expectedMinTLSVersion string + expectedSuites []string + }{ + { + name: "NoAPIServerConfig", + config: nil, + existing: existingConfig, + expectedMinTLSVersion: "VersionTLS12", + expectedSuites: crypto.OpenSSLToIANACipherSuites(configv1.TLSProfiles[configv1.TLSProfileIntermediateType].Ciphers), + }, + { + name: "ModernCrypto", + config: &configv1.TLSSecurityProfile{ + Type: configv1.TLSProfileModernType, + Modern: &configv1.ModernTLSProfile{}, + }, + existing: existingConfig, + expectedMinTLSVersion: "VersionTLS13", + expectedSuites: crypto.OpenSSLToIANACipherSuites(configv1.TLSProfiles[configv1.TLSProfileModernType].Ciphers), + }, + { + name: "OldCrypto", + config: &configv1.TLSSecurityProfile{ + Type: configv1.TLSProfileOldType, + Old: &configv1.OldTLSProfile{}, + }, + existing: existingConfig, + expectedMinTLSVersion: "VersionTLS10", + expectedSuites: crypto.OpenSSLToIANACipherSuites(configv1.TLSProfiles[configv1.TLSProfileOldType].Ciphers), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if tt.config != nil { + if err := indexer.Add(&configv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configv1.APIServerSpec{ + TLSSecurityProfile: tt.config, + }, + }); err != nil { + t.Fatal(err) + } + } + listers := testLister{ + lister: configlistersv1.NewAPIServerLister(indexer), + } + + result, errs := ObserveTLSSecurityProfile(listers, events.NewInMemoryRecorder(t.Name()), tt.existing) + if len(errs) > 0 { + t.Errorf("expected 0 errors, got %v", errs) + } + + gotMinTLSVersion, _, err := unstructured.NestedString(result, "servingInfo", "minTLSVersion") + if err != nil { + t.Errorf("couldn't get minTLSVersion from the returned object: %v", err) + } + + gotSuites, _, err := unstructured.NestedStringSlice(result, "servingInfo", "cipherSuites") + if err != nil { + t.Errorf("couldn't get cipherSuites from the returned object: %v", err) + } + + if !reflect.DeepEqual(gotSuites, tt.expectedSuites) { + t.Errorf("ObserveTLSSecurityProfile() got cipherSuites = %v, expected %v", gotSuites, tt.expectedSuites) + } + if gotMinTLSVersion != tt.expectedMinTLSVersion { + t.Errorf("ObserveTLSSecurityProfile() got minTlSVersion = %v, expected %v", gotMinTLSVersion, tt.expectedMinTLSVersion) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go new file mode 100644 index 00000000000..f0e5c252f36 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go @@ -0,0 +1,163 @@ +package cloudprovider + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" +) + +const ( + cloudProviderConfFilePath = "/etc/kubernetes/static-pod-resources/configmaps/cloud-config/%s" + configNamespace = "openshift-config" +) + +// InfrastructureLister lists infrastrucre information and allows resources to be synced +type InfrastructureLister interface { + InfrastructureLister() configlistersv1.InfrastructureLister + ResourceSyncer() resourcesynccontroller.ResourceSyncer +} + +// NewCloudProviderObserver returns a new cloudprovider observer for syncing cloud provider specific +// information to controller-manager and api-server. +func NewCloudProviderObserver(targetNamespaceName string, cloudProviderNamePath, cloudProviderConfigPath []string) configobserver.ObserveConfigFunc { + cloudObserver := &cloudProviderObserver{ + targetNamespaceName: targetNamespaceName, + cloudProviderNamePath: cloudProviderNamePath, + cloudProviderConfigPath: cloudProviderConfigPath, + } + return cloudObserver.ObserveCloudProviderNames +} + +type cloudProviderObserver struct { + targetNamespaceName string + cloudProviderNamePath []string + cloudProviderConfigPath []string +} + +// ObserveCloudProviderNames observes the cloud provider from the global cluster infrastructure resource. +func (c *cloudProviderObserver) ObserveCloudProviderNames(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + listers := genericListers.(InfrastructureLister) + var errs []error + cloudProvidersPath := c.cloudProviderNamePath + cloudProviderConfPath := c.cloudProviderConfigPath + previouslyObservedConfig := map[string]interface{}{} + + existingCloudConfig, _, err := unstructured.NestedStringSlice(existingConfig, cloudProviderConfPath...) + if err != nil { + return previouslyObservedConfig, append(errs, err) + } + + if currentCloudProvider, _, _ := unstructured.NestedStringSlice(existingConfig, cloudProvidersPath...); len(currentCloudProvider) > 0 { + if err := unstructured.SetNestedStringSlice(previouslyObservedConfig, currentCloudProvider, cloudProvidersPath...); err != nil { + errs = append(errs, err) + } + } + + if len(existingCloudConfig) > 0 { + if err := unstructured.SetNestedStringSlice(previouslyObservedConfig, existingCloudConfig, cloudProviderConfPath...); err != nil { + errs = append(errs, err) + } + } + + observedConfig := map[string]interface{}{} + + infrastructure, err := listers.InfrastructureLister().Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveCloudProviderNames", "Required infrastructures.%s/cluster not found", configv1.GroupName) + return observedConfig, errs + } + if err != nil { + errs = append(errs, err) + return previouslyObservedConfig, errs + } + + cloudProvider := getPlatformName(infrastructure.Status.Platform, recorder) + if len(cloudProvider) > 0 { + if err := unstructured.SetNestedStringSlice(observedConfig, []string{cloudProvider}, cloudProvidersPath...); err != nil { + errs = append(errs, err) + } + } + + sourceCloudConfigMap := infrastructure.Spec.CloudConfig.Name + sourceCloudConfigNamespace := configNamespace + sourceLocation := resourcesynccontroller.ResourceLocation{ + Namespace: sourceCloudConfigNamespace, + Name: sourceCloudConfigMap, + } + + // we set cloudprovider configmap values only for some cloud providers. + validCloudProviders := sets.NewString("azure", "gce", "openstack", "vsphere") + if !validCloudProviders.Has(cloudProvider) { + sourceCloudConfigMap = "" + } + + if len(sourceCloudConfigMap) == 0 { + sourceLocation = resourcesynccontroller.ResourceLocation{} + } + + err = listers.ResourceSyncer().SyncConfigMap( + resourcesynccontroller.ResourceLocation{ + Namespace: c.targetNamespaceName, + Name: "cloud-config", + }, + sourceLocation) + + if err != nil { + errs = append(errs, err) + return observedConfig, errs + } + + if len(sourceCloudConfigMap) == 0 { + return observedConfig, errs + } + + // usually key will be simply config but we should refer it just in case + staticCloudConfFile := fmt.Sprintf(cloudProviderConfFilePath, infrastructure.Spec.CloudConfig.Key) + + if err := unstructured.SetNestedStringSlice(observedConfig, []string{staticCloudConfFile}, cloudProviderConfPath...); err != nil { + recorder.Warningf("ObserveCloudProviderNames", "Failed setting cloud-config : %v", err) + errs = append(errs, err) + } + + if !equality.Semantic.DeepEqual(existingCloudConfig, []string{staticCloudConfFile}) { + recorder.Eventf("ObserveCloudProviderNamesChanges", "CloudProvider config file changed to %s", staticCloudConfFile) + } + + return observedConfig, errs +} + +func getPlatformName(platformType configv1.PlatformType, recorder events.Recorder) string { + cloudProvider := "" + switch platformType { + case "": + recorder.Warningf("ObserveCloudProvidersFailed", "Required status.platform field is not set in infrastructures.%s/cluster", configv1.GroupName) + case configv1.AWSPlatformType: + cloudProvider = "aws" + case configv1.AzurePlatformType: + cloudProvider = "azure" + case configv1.VSpherePlatformType: + cloudProvider = "vsphere" + case configv1.BareMetalPlatformType: + case configv1.GCPPlatformType: + cloudProvider = "gce" + case configv1.LibvirtPlatformType: + case configv1.OpenStackPlatformType: + cloudProvider = "openstack" + case configv1.NonePlatformType: + default: + // the new doc on the infrastructure fields requires that we treat an unrecognized thing the same bare metal. + // TODO find a way to indicate to the user that we didn't honor their choice + recorder.Warningf("ObserveCloudProvidersFailed", fmt.Sprintf("No recognized cloud provider platform found in infrastructures.%s/cluster.status.platform", configv1.GroupName)) + } + return cloudProvider +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go new file mode 100644 index 00000000000..1260ae3a68e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go @@ -0,0 +1,110 @@ +package cloudprovider + +import ( + "testing" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" +) + +type FakeResourceSyncer struct{} + +func (fakeSyncer *FakeResourceSyncer) SyncConfigMap(destination, source resourcesynccontroller.ResourceLocation) error { + return nil +} + +func (fakeSyncer *FakeResourceSyncer) SyncSecret(destination, source resourcesynccontroller.ResourceLocation) error { + return nil +} + +type FakeInfrastructureLister struct { + InfrastructureLister_ configlistersv1.InfrastructureLister + ResourceSync resourcesynccontroller.ResourceSyncer + PreRunCachesSynced []cache.InformerSynced +} + +func (l FakeInfrastructureLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return l.ResourceSync +} + +func (l FakeInfrastructureLister) InfrastructureLister() configlistersv1.InfrastructureLister { + return l.InfrastructureLister_ +} + +func (l FakeInfrastructureLister) PreRunHasSynced() []cache.InformerSynced { + return l.PreRunCachesSynced +} + +func TestObserveCloudProviderNames(t *testing.T) { + cases := []struct { + platform configv1.PlatformType + expected string + cloudProviderCount int + }{{ + platform: configv1.AWSPlatformType, + expected: "aws", + cloudProviderCount: 1, + }, { + platform: configv1.AzurePlatformType, + expected: "azure", + cloudProviderCount: 1, + }, { + platform: configv1.BareMetalPlatformType, + cloudProviderCount: 0, + }, { + platform: configv1.LibvirtPlatformType, + cloudProviderCount: 0, + }, { + platform: configv1.OpenStackPlatformType, + expected: "openstack", + cloudProviderCount: 1, + }, { + platform: configv1.GCPPlatformType, + expected: "gce", + cloudProviderCount: 1, + }, { + platform: configv1.NonePlatformType, + cloudProviderCount: 0, + }, { + platform: "", + cloudProviderCount: 0, + }} + for _, c := range cases { + t.Run(string(c.platform), func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if err := indexer.Add(&configv1.Infrastructure{ObjectMeta: v1.ObjectMeta{Name: "cluster"}, Status: configv1.InfrastructureStatus{Platform: c.platform}}); err != nil { + t.Fatal(err.Error()) + } + listers := FakeInfrastructureLister{ + InfrastructureLister_: configlistersv1.NewInfrastructureLister(indexer), + ResourceSync: &FakeResourceSyncer{}, + } + cloudProvidersPath := []string{"extendedArguments", "cloud-provider"} + cloudProviderConfPath := []string{"extendedArguments", "cloud-config"} + observerFunc := NewCloudProviderObserver("kube-controller-manager", cloudProvidersPath, cloudProviderConfPath) + result, errs := observerFunc(listers, events.NewInMemoryRecorder("cloud"), map[string]interface{}{}) + if len(errs) > 0 { + t.Fatal(errs) + } + cloudProvider, _, err := unstructured.NestedSlice(result, "extendedArguments", "cloud-provider") + if err != nil { + t.Fatal(err) + } + if e, a := c.cloudProviderCount, len(cloudProvider); e != a { + t.Fatalf("expected len(cloudProvider) == %d, got %d", e, a) + } + if c.cloudProviderCount > 0 { + if e, a := c.expected, cloudProvider[0]; e != a { + t.Errorf("expected cloud-provider=%s, got %s", e, a) + } + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go new file mode 100644 index 00000000000..c14ca7cc806 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -0,0 +1,193 @@ +package configobserver + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/imdario/mergo" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/rand" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const configObserverWorkKey = "key" + +// Listers is an interface which will be passed to the config observer funcs. It is expected to be hard-cast to the "correct" type +type Listers interface { + // ResourceSyncer can be used to copy content from one namespace to another + ResourceSyncer() resourcesynccontroller.ResourceSyncer + PreRunHasSynced() []cache.InformerSynced +} + +// ObserveConfigFunc observes configuration and returns the observedConfig. This function should not return an +// observedConfig that would cause the service being managed by the operator to crash. For example, if a required +// configuration key cannot be observed, consider reusing the configuration key's previous value. Errors that occur +// while attempting to generate the observedConfig should be returned in the errs slice. +type ObserveConfigFunc func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) + +type ConfigObserver struct { + + // observers are called in an undefined order and their results are merged to + // determine the observed configuration. + observers []ObserveConfigFunc + + operatorClient v1helpers.OperatorClient + // listers are used by config observers to retrieve necessary resources + listers Listers + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +func NewConfigObserver( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, + listers Listers, + observers ...ObserveConfigFunc, +) *ConfigObserver { + return &ConfigObserver{ + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("config-observer"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ConfigObserver"), + + observers: observers, + listers: listers, + } +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c ConfigObserver) sync() error { + originalSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + spec := originalSpec.DeepCopy() + + // don't worry about errors. If we can't decode, we'll simply stomp over the field. + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(spec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil { + klog.V(4).Infof("decode of existing config failed with error: %v", err) + } + + var errs []error + var observedConfigs []map[string]interface{} + for _, i := range rand.Perm(len(c.observers)) { + var currErrs []error + observedConfig, currErrs := c.observers[i](c.listers, c.eventRecorder, existingConfig) + observedConfigs = append(observedConfigs, observedConfig) + errs = append(errs, currErrs...) + } + + mergedObservedConfig := map[string]interface{}{} + for _, observedConfig := range observedConfigs { + if err := mergo.Merge(&mergedObservedConfig, observedConfig); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + reverseMergedObservedConfig := map[string]interface{}{} + for i := len(observedConfigs) - 1; i >= 0; i-- { + if err := mergo.Merge(&reverseMergedObservedConfig, observedConfigs[i]); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + if !equality.Semantic.DeepEqual(mergedObservedConfig, reverseMergedObservedConfig) { + errs = append(errs, errors.New("non-deterministic config observation detected")) + } + + if !equality.Semantic.DeepEqual(existingConfig, mergedObservedConfig) { + c.eventRecorder.Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.ObjectDiff(existingConfig, mergedObservedConfig)) + if _, _, err := v1helpers.UpdateSpec(c.operatorClient, v1helpers.UpdateObservedConfigFn(mergedObservedConfig)); err != nil { + // At this point we failed to write the updated config. If we are permanently broken, do not pile the errors from observers + // but instead reset the errors and only report single error condition. + errs = []error{fmt.Errorf("error writing updated observed config: %v", err)} + c.eventRecorder.Warningf("ObservedConfigWriteError", "Failed to write observed config: %v", err) + } + } + configError := v1helpers.NewMultiLineAggregate(errs) + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: condition.ConfigObservationDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if configError != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = configError.Error() + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + + return configError +} + +func (c *ConfigObserver) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting ConfigObserver") + defer klog.Infof("Shutting down ConfigObserver") + if !cache.WaitForCacheSync(ctx.Done(), c.listers.PreRunHasSynced()...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *ConfigObserver) runWorker(_ context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *ConfigObserver) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *ConfigObserver) EventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(configObserverWorkKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(configObserverWorkKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(configObserverWorkKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go new file mode 100644 index 00000000000..9641551f47a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go @@ -0,0 +1,285 @@ +package configobserver + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + + "github.com/davecgh/go-spew/spew" + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { + return nil +} + +func (c *fakeOperatorClient) GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) { + return c.startingSpec, &operatorv1.OperatorStatus{}, "", nil + +} +func (c *fakeOperatorClient) UpdateOperatorSpec(rv string, in *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + if c.specUpdateFailure != nil { + return &operatorv1.OperatorSpec{}, rv, c.specUpdateFailure + } + c.spec = in + return in, rv, c.specUpdateFailure +} +func (c *fakeOperatorClient) UpdateOperatorStatus(rv string, in *operatorv1.OperatorStatus) (status *operatorv1.OperatorStatus, err error) { + c.status = in + return in, nil +} + +type fakeOperatorClient struct { + startingSpec *operatorv1.OperatorSpec + specUpdateFailure error + + status *operatorv1.OperatorStatus + spec *operatorv1.OperatorSpec +} + +type fakeLister struct{} + +func (l *fakeLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return nil +} + +func (l *fakeLister) PreRunHasSynced() []cache.InformerSynced { + return []cache.InformerSynced{ + func() bool { return true }, + } +} + +func TestSyncStatus(t *testing.T) { + testCases := []struct { + name string + fakeClient func() *fakeOperatorClient + observers []ObserveConfigFunc + + expectError bool + expectEvents [][]string + expectedObservedConfig *unstructured.Unstructured + expectedCondition *operatorv1.OperatorCondition + }{ + { + name: "HappyPath", + fakeClient: func() *fakeOperatorClient { + return &fakeOperatorClient{ + startingSpec: &operatorv1.OperatorSpec{}, + } + }, + expectEvents: [][]string{ + {"ObservedConfigChanged", "Writing updated observed config"}, + }, + observers: []ObserveConfigFunc{ + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"foo": "one"}, nil + }, + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"bar": "two"}, nil + }, + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"baz": "three"}, nil + }, + }, + + expectError: false, + expectedObservedConfig: &unstructured.Unstructured{Object: map[string]interface{}{ + "foo": "one", + "bar": "two", + "baz": "three", + }}, + expectedCondition: &operatorv1.OperatorCondition{ + Type: condition.ConfigObservationDegradedConditionType, + Status: operatorv1.ConditionFalse, + }, + }, + { + name: "MergeTwoOfThreeWithError", + fakeClient: func() *fakeOperatorClient { + return &fakeOperatorClient{ + startingSpec: &operatorv1.OperatorSpec{}, + } + }, + expectEvents: [][]string{ + {"ObservedConfigChanged", "Writing updated observed config"}, + }, + observers: []ObserveConfigFunc{ + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"foo": "one"}, nil + }, + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"bar": "two"}, nil + }, + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + errs = append(errs, fmt.Errorf("some failure")) + return observedConfig, errs + }, + }, + + expectError: true, + expectedObservedConfig: &unstructured.Unstructured{Object: map[string]interface{}{ + "foo": "one", + "bar": "two", + }}, + expectedCondition: &operatorv1.OperatorCondition{ + Type: condition.ConfigObservationDegradedConditionType, + Status: operatorv1.ConditionTrue, + Reason: "Error", + Message: "some failure", + }, + }, + { + name: "TestUpdateFailed", + fakeClient: func() *fakeOperatorClient { + return &fakeOperatorClient{ + startingSpec: &operatorv1.OperatorSpec{}, + specUpdateFailure: fmt.Errorf("update spec failure"), + } + }, + expectEvents: [][]string{ + {"ObservedConfigChanged", "Writing updated observed config"}, + {"ObservedConfigWriteError", "Failed to write observed config: update spec failure"}, + }, + observers: []ObserveConfigFunc{ + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"foo": "one"}, nil + }, + }, + + expectError: true, + expectedObservedConfig: nil, + expectedCondition: &operatorv1.OperatorCondition{ + Type: condition.ConfigObservationDegradedConditionType, + Status: operatorv1.ConditionTrue, + Reason: "Error", + Message: "error writing updated observed config: update spec failure", + }, + }, + { + name: "NonDeterministic", + fakeClient: func() *fakeOperatorClient { + return &fakeOperatorClient{ + startingSpec: &operatorv1.OperatorSpec{}, + } + }, + expectEvents: [][]string{ + {"ObservedConfigChanged", "Writing updated observed config"}, + }, + observers: []ObserveConfigFunc{ + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"level1": map[string]interface{}{"level2_c": []interface{}{"slice_entry_a"}}}, nil + }, + func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) { + return map[string]interface{}{"level1": map[string]interface{}{"level2_c": []interface{}{"slice_entry_b"}}}, nil + }, + }, + + expectError: true, + expectedCondition: &operatorv1.OperatorCondition{ + Type: condition.ConfigObservationDegradedConditionType, + Status: operatorv1.ConditionTrue, + Reason: "Error", + Message: "non-deterministic config observation detected", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + operatorConfigClient := tc.fakeClient() + eventClient := fake.NewSimpleClientset() + + configObserver := ConfigObserver{ + listers: &fakeLister{}, + operatorClient: operatorConfigClient, + observers: tc.observers, + eventRecorder: events.NewRecorder(eventClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}), + } + err := configObserver.sync() + if tc.expectError && err == nil { + t.Fatal("error expected") + } + if !tc.expectError && err != nil { + t.Fatal(err) + } + + observedEvents := [][]string{} + for _, action := range eventClient.Actions() { + if !action.Matches("create", "events") { + continue + } + event := action.(ktesting.CreateAction).GetObject().(*corev1.Event) + observedEvents = append(observedEvents, []string{event.Reason, event.Message}) + } + for i, event := range tc.expectEvents { + if observedEvents[i][0] != event[0] { + t.Errorf("expected %d event reason to be %q, got %q", i, event[0], observedEvents[i][0]) + } + if !strings.HasPrefix(observedEvents[i][1], event[1]) { + t.Errorf("expected %d event message to be %q, got %q", i, event[1], observedEvents[i][1]) + } + } + if len(tc.expectEvents) != len(observedEvents) { + t.Errorf("expected %d events, got %d (%#v)", len(tc.expectEvents), len(observedEvents), observedEvents) + } + + switch { + case tc.expectedObservedConfig != nil && operatorConfigClient.spec == nil: + t.Error("missing expected spec") + case tc.expectedObservedConfig != nil: + if !reflect.DeepEqual(tc.expectedObservedConfig, operatorConfigClient.spec.ObservedConfig.Object) { + t.Errorf("\n===== observed config expected:\n%v\n===== observed config actual:\n%v", toYAML(tc.expectedObservedConfig), toYAML(operatorConfigClient.spec.ObservedConfig.Object)) + } + } + + switch { + case tc.expectedCondition != nil && operatorConfigClient.status == nil: + t.Error("missing expected status") + case tc.expectedCondition != nil: + condition := v1helpers.FindOperatorCondition(operatorConfigClient.status.Conditions, condition.ConfigObservationDegradedConditionType) + condition.LastTransitionTime = tc.expectedCondition.LastTransitionTime + if !reflect.DeepEqual(tc.expectedCondition, condition) { + t.Fatalf("\n===== condition expected:\n%v\n===== condition actual:\n%v", toYAML(tc.expectedCondition), toYAML(condition)) + } + default: + if operatorConfigClient.status != nil { + t.Errorf("unexpected %v", spew.Sdump(operatorConfigClient.status)) + } + } + + }) + } +} + +func TestMergoVersion(t *testing.T) { + type test struct{ A string } + src := test{"src"} + dest := test{"dest"} + mergo.Merge(&dest, &src, mergo.WithOverride) + if dest.A != "src" { + t.Errorf("incompatible version of github.com/imdario/mergo found") + } +} + +func toYAML(o interface{}) string { + b, e := yaml.Marshal(o) + if e != nil { + return e.Error() + } + return string(b) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go new file mode 100644 index 00000000000..cd38603d5e1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go @@ -0,0 +1,134 @@ +package featuregates + +import ( + "fmt" + "reflect" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +type FeatureGateLister interface { + FeatureGateLister() configlistersv1.FeatureGateLister +} + +func NewObserveFeatureFlagsFunc(knownFeatures sets.String, configPath []string) configobserver.ObserveConfigFunc { + return (&featureFlags{ + allowAll: len(knownFeatures) == 0, + knownFeatures: knownFeatures, + configPath: configPath, + }).ObserveFeatureFlags +} + +type featureFlags struct { + allowAll bool + knownFeatures sets.String + configPath []string +} + +// ObserveFeatureFlags fills in --feature-flags for the kube-apiserver +func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + listers := genericListers.(FeatureGateLister) + errs := []error{} + prevObservedConfig := map[string]interface{}{} + + currentConfigValue, _, err := unstructured.NestedStringSlice(existingConfig, f.configPath...) + if err != nil { + errs = append(errs, err) + } + if len(currentConfigValue) > 0 { + if err := unstructured.SetNestedStringSlice(prevObservedConfig, currentConfigValue, f.configPath...); err != nil { + errs = append(errs, err) + } + } + + observedConfig := map[string]interface{}{} + configResource, err := listers.FeatureGateLister().Get("cluster") + // if we have no featuregate, then the installer and MCO probably still have way to reconcile certain custom resources + // we will assume that this means the same as default and hope for the best + if apierrors.IsNotFound(err) { + configResource = &configv1.FeatureGate{ + Spec: configv1.FeatureGateSpec{ + FeatureGateSelection: configv1.FeatureGateSelection{ + FeatureSet: configv1.Default, + }, + }, + } + } else if err != nil { + errs = append(errs, err) + return prevObservedConfig, errs + } + + newConfigValue, err := f.getWhitelistedFeatureNames(configResource) + if err != nil { + errs = append(errs, err) + return prevObservedConfig, errs + } + if !reflect.DeepEqual(currentConfigValue, newConfigValue) { + recorder.Eventf("ObserveFeatureFlagsUpdated", "Updated %v to %s", strings.Join(f.configPath, "."), strings.Join(newConfigValue, ",")) + } + + if err := unstructured.SetNestedStringSlice(observedConfig, newConfigValue, f.configPath...); err != nil { + recorder.Warningf("ObserveFeatureFlags", "Failed setting %v: %v", strings.Join(f.configPath, "."), err) + errs = append(errs, err) + } + + return observedConfig, errs +} + +func (f *featureFlags) getWhitelistedFeatureNames(fg *configv1.FeatureGate) ([]string, error) { + var err error + newConfigValue := []string{} + enabledFeatures := []string{} + disabledFeatures := []string{} + formatEnabledFunc := func(fs string) string { + return fmt.Sprintf("%s=true", fs) + } + formatDisabledFunc := func(fs string) string { + return fmt.Sprintf("%s=false", fs) + } + + enabledFeatures, disabledFeatures, err = getFeaturesFromTheSpec(fg) + if err != nil { + return nil, err + } + + for _, enable := range enabledFeatures { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(enable) { + continue + } + newConfigValue = append(newConfigValue, formatEnabledFunc(enable)) + } + for _, disable := range disabledFeatures { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(disable) { + continue + } + newConfigValue = append(newConfigValue, formatDisabledFunc(disable)) + } + + return newConfigValue, nil +} + +func getFeaturesFromTheSpec(fg *configv1.FeatureGate) ([]string, []string, error) { + if fg.Spec.FeatureSet == configv1.CustomNoUpgrade { + if fg.Spec.FeatureGateSelection.CustomNoUpgrade != nil { + return fg.Spec.FeatureGateSelection.CustomNoUpgrade.Enabled, fg.Spec.FeatureGateSelection.CustomNoUpgrade.Disabled, nil + } + return []string{}, []string{}, nil + } + + featureSet, ok := configv1.FeatureSets[fg.Spec.FeatureSet] + if !ok { + return []string{}, []string{}, fmt.Errorf(".spec.featureSet %q not found", featureSet) + } + return featureSet.Enabled, featureSet.Disabled, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go new file mode 100644 index 00000000000..aebdbd3abd2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go @@ -0,0 +1,140 @@ +package featuregates + +import ( + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" +) + +type testLister struct { + lister configlistersv1.FeatureGateLister +} + +func (l testLister) FeatureGateLister() configlistersv1.FeatureGateLister { + return l.lister +} + +func (l testLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return nil +} + +func (l testLister) PreRunHasSynced() []cache.InformerSynced { + return nil +} + +func TestObserveFeatureFlags(t *testing.T) { + configPath := []string{"foo", "bar"} + + tests := []struct { + name string + + configValue configv1.FeatureSet + expectedResult []string + expectError bool + customNoUpgrade *configv1.CustomFeatureGates + knownFeatures sets.String + }{ + { + name: "default", + configValue: configv1.Default, + expectedResult: []string{ + "ExperimentalCriticalPodAnnotation=true", + "RotateKubeletServerCertificate=true", + "SupportPodPidsLimit=true", + "TLSSecurityProfile=true", + "NodeDisruptionExclusion=true", + "ServiceNodeExclusion=true", + "LegacyNodeRoleBehavior=false", + }, + }, + { + name: "techpreview", + configValue: configv1.TechPreviewNoUpgrade, + expectedResult: []string{ + "ExperimentalCriticalPodAnnotation=true", + "RotateKubeletServerCertificate=true", + "SupportPodPidsLimit=true", + "TLSSecurityProfile=true", + "NodeDisruptionExclusion=true", + "ServiceNodeExclusion=true", + "LegacyNodeRoleBehavior=false", + }, + }, + { + name: "custom no upgrade and all allowed", + configValue: configv1.CustomNoUpgrade, + expectedResult: []string{ + "CustomFeatureEnabled=true", + "CustomFeatureDisabled=false", + }, + customNoUpgrade: &configv1.CustomFeatureGates{ + Enabled: []string{"CustomFeatureEnabled"}, + Disabled: []string{"CustomFeatureDisabled"}, + }, + }, + { + name: "custom no upgrade flag set and none upgrades were provided", + configValue: configv1.CustomNoUpgrade, + expectedResult: []string{}, + }, + { + name: "custom no upgrade and known features", + configValue: configv1.CustomNoUpgrade, + expectedResult: []string{ + "CustomFeatureEnabled=true", + }, + customNoUpgrade: &configv1.CustomFeatureGates{ + Enabled: []string{"CustomFeatureEnabled"}, + Disabled: []string{"CustomFeatureDisabled"}, + }, + knownFeatures: sets.NewString("CustomFeatureEnabled"), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(&configv1.FeatureGate{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: configv1.FeatureGateSpec{ + FeatureGateSelection: configv1.FeatureGateSelection{ + FeatureSet: tc.configValue, + CustomNoUpgrade: tc.customNoUpgrade, + }, + }, + }) + listers := testLister{ + lister: configlistersv1.NewFeatureGateLister(indexer), + } + eventRecorder := events.NewInMemoryRecorder("") + + initialExistingConfig := map[string]interface{}{} + + observeFn := NewObserveFeatureFlagsFunc(tc.knownFeatures, configPath) + + observed, errs := observeFn(listers, eventRecorder, initialExistingConfig) + if len(errs) != 0 && !tc.expectError { + t.Fatal(errs) + } + if len(errs) == 0 && tc.expectError { + t.Fatal("expected an error but got nothing") + } + actual, _, err := unstructured.NestedStringSlice(observed, configPath...) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !reflect.DeepEqual(tc.expectedResult, actual) { + t.Errorf("Unexpected features gates\n got: %v\n expected: %v", actual, tc.expectedResult) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/OWNERS new file mode 100644 index 00000000000..ce2862b8700 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/OWNERS @@ -0,0 +1,10 @@ +reviewers: + - squeed + - dcbw + - danwinship + - knobunc +approvers: + - squeed + - dcbw + - danwinship + - knobunc \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go new file mode 100644 index 00000000000..8d467655835 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go @@ -0,0 +1,129 @@ +package network + +import ( + "fmt" + "net" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "k8s.io/apimachinery/pkg/api/errors" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// GetClusterCIDRs reads the cluster CIDRs from the global network configuration resource. Emits events if CIDRs are not found. +func GetClusterCIDRs(lister configlistersv1.NetworkLister, recorder events.Recorder) ([]string, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("GetClusterCIDRsFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return nil, nil + } + if err != nil { + recorder.Warningf("GetClusterCIDRsFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return nil, err + } + + if len(network.Status.ClusterNetwork) == 0 { + recorder.Warningf("GetClusterCIDRsFailed", "Required status.clusterNetwork field is not set in networks.%s/cluster", configv1.GroupName) + return nil, fmt.Errorf("networks.%s/cluster: status.clusterNetwork not found", configv1.GroupName) + } + + var clusterCIDRs []string + for i, clusterNetwork := range network.Status.ClusterNetwork { + if len(clusterNetwork.CIDR) == 0 { + recorder.Warningf("GetClusterCIDRsFailed", "Required status.clusterNetwork[%d].cidr field is not set in networks.%s/cluster", i, configv1.GroupName) + return nil, fmt.Errorf("networks.%s/cluster: status.clusterNetwork[%d].cidr not found", configv1.GroupName, i) + } + clusterCIDRs = append(clusterCIDRs, clusterNetwork.CIDR) + } + + return clusterCIDRs, nil +} + +// GetServiceCIDR reads the service IP range from the global network configuration resource. Emits events if CIDRs are not found. +func GetServiceCIDR(lister configlistersv1.NetworkLister, recorder events.Recorder) (string, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("GetServiceCIDRFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return "", nil + } + if err != nil { + recorder.Warningf("GetServiceCIDRFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return "", err + } + + if len(network.Status.ServiceNetwork) == 0 || len(network.Status.ServiceNetwork[0]) == 0 { + recorder.Warningf("GetServiceCIDRFailed", "Required status.serviceNetwork field is not set in networks.%s/cluster", configv1.GroupName) + return "", fmt.Errorf("networks.%s/cluster: status.serviceNetwork not found", configv1.GroupName) + } + + return network.Status.ServiceNetwork[0], nil +} + +// GetExternalIPPolicy retrieves the ExternalIPPolicy for the cluster. +// The policy may be null. +func GetExternalIPPolicy(lister configlistersv1.NetworkLister, recorder events.Recorder) (*configv1.ExternalIPPolicy, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("GetExternalIPPolicyFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return nil, nil + } + if err != nil { + recorder.Warningf("GetExternalIPPolicyFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return nil, err + } + + if network.Spec.ExternalIP == nil { + return nil, nil + } + + pol := network.Spec.ExternalIP.Policy + if pol != nil { + if err := validateCIDRs(pol.AllowedCIDRs); err != nil { + recorder.Warningf("GetExternalIPPolicyFailed", "error parsing networks.%s/cluster Spec.ExternalIP.Policy.AllowedCIDRs: invalid cidr: %v", configv1.GroupName, err) + return nil, err + } + if err := validateCIDRs(pol.RejectedCIDRs); err != nil { + recorder.Warningf("GetExternalIPPolicyFailed", "error parsing networks.%s/cluster Spec.ExternalIP.Policy.RejectedCIDRs: invalid cidr: %v", configv1.GroupName, err) + return nil, err + } + } + + return network.Spec.ExternalIP.Policy, nil +} + +// GetExternalIPAutoAssignCIDRs retrieves the ExternalIPAutoAssignCIDRs, if configured. +func GetExternalIPAutoAssignCIDRs(lister configlistersv1.NetworkLister, recorder events.Recorder) ([]string, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("GetExternalIPAutoAssignCIDRsFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return nil, nil + } + if err != nil { + recorder.Warningf("GetExternalIPAutoAssignCIDRsFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return nil, err + } + + if network.Spec.ExternalIP == nil { + return nil, nil + } + + // ensure all ips are valid + if err := validateCIDRs(network.Spec.ExternalIP.AutoAssignCIDRs); err != nil { + recorder.Warningf("GetExternalIPAutoAssignCIDRsFailed", "error parsing networks.%s/cluster Spec.ExternalIP.AutoAssignCIDRs: invalid cidr: %v", configv1.GroupName, err) + return nil, err + } + + return network.Spec.ExternalIP.AutoAssignCIDRs, nil +} + +// validateCIDRs returns an err if any cidr in the list is invalid +func validateCIDRs(in []string) error { + for _, cidr := range in { + _, _, err := net.ParseCIDR(cidr) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_networking_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_networking_test.go new file mode 100644 index 00000000000..a3668270088 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_networking_test.go @@ -0,0 +1,107 @@ +package network + +import ( + "reflect" + "testing" + + "github.com/ghodss/yaml" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestObserveClusterCIDRs(t *testing.T) { + type Test struct { + name string + config *configv1.Network + expected []string + expectedError bool + } + tests := []Test{ + { + "clusterNetworks", + &configv1.Network{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: configv1.NetworkStatus{ + ClusterNetwork: []configv1.ClusterNetworkEntry{ + {CIDR: "podCIDR1"}, + {CIDR: "podCIDR2"}, + }, + }, + }, + []string{"podCIDR1", "podCIDR2"}, + false, + }, + { + "none, no old config", + &configv1.Network{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: configv1.NetworkStatus{}, + }, + nil, + true, + }, + { + "none, existing config", + &configv1.Network{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: configv1.NetworkStatus{}, + }, + nil, + true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if err := indexer.Add(test.config); err != nil { + t.Fatal(err.Error()) + } + result, err := GetClusterCIDRs(configlistersv1.NewNetworkLister(indexer), events.NewInMemoryRecorder("network")) + if err != nil && !test.expectedError { + t.Fatal(err) + } else if err == nil { + if test.expectedError { + t.Fatalf("expected error, but got none") + } + if !reflect.DeepEqual(test.expected, result) { + t.Errorf("\n===== observed config expected:\n%v\n===== observed config actual:\n%v", toYAML(test.expected), toYAML(result)) + } + } + }) + } +} + +func TestObserveServiceClusterIPRanges(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + if err := indexer.Add(&configv1.Network{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Status: configv1.NetworkStatus{ + ServiceNetwork: []string{"serviceCIDR"}, + }, + }, + ); err != nil { + t.Fatal(err.Error()) + } + result, err := GetServiceCIDR(configlistersv1.NewNetworkLister(indexer), events.NewInMemoryRecorder("network")) + if err != nil { + t.Fatal(err) + } + + if expected := "serviceCIDR"; !reflect.DeepEqual(expected, result) { + t.Errorf("\n===== observed config expected:\n%v\n===== observed config actual:\n%v", toYAML(expected), toYAML(result)) + } +} + +func toYAML(o interface{}) string { + b, e := yaml.Marshal(o) + if e != nil { + return e.Error() + } + return string(b) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go new file mode 100644 index 00000000000..4231de35838 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go @@ -0,0 +1,92 @@ +package proxy + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +type ProxyLister interface { + ProxyLister() configlistersv1.ProxyLister +} + +func NewProxyObserveFunc(configPath []string) configobserver.ObserveConfigFunc { + return (&observeProxyFlags{ + configPath: configPath, + }).ObserveProxyConfig +} + +type observeProxyFlags struct { + configPath []string +} + +// ObserveProxyConfig observes the proxy.config.openshift.io/cluster object and writes +// its content to an unstructured object in a string map at the path from the constructor +func (f *observeProxyFlags) ObserveProxyConfig(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + proxyLister := genericListers.(ProxyLister) + + errs := []error{} + prevObservedProxyConfig := map[string]interface{}{} + + // grab the current Proxy config to later check whether it was updated + currentProxyMap, _, err := unstructured.NestedStringMap(existingConfig, f.configPath...) + if err != nil { + return prevObservedProxyConfig, append(errs, err) + } + + if len(currentProxyMap) > 0 { + unstructured.SetNestedStringMap(prevObservedProxyConfig, currentProxyMap, f.configPath...) + } + + observedConfig := map[string]interface{}{} + proxyConfig, err := proxyLister.ProxyLister().Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveProxyConfig", "proxy.%s/cluster not found", configv1.GroupName) + return observedConfig, errs + } + if err != nil { + errs = append(errs, err) + return existingConfig, errs + } + + newProxyMap := proxyToMap(proxyConfig) + if newProxyMap != nil { + if err := unstructured.SetNestedStringMap(observedConfig, newProxyMap, f.configPath...); err != nil { + errs = append(errs, err) + } + } + + if !reflect.DeepEqual(currentProxyMap, newProxyMap) { + recorder.Eventf("ObserveProxyConfig", "proxy changed to %q", newProxyMap) + } + + return observedConfig, errs +} + +func proxyToMap(proxy *configv1.Proxy) map[string]string { + proxyMap := map[string]string{} + + if noProxy := proxy.Status.NoProxy; len(noProxy) > 0 { + proxyMap["NO_PROXY"] = noProxy + } + + if httpProxy := proxy.Status.HTTPProxy; len(httpProxy) > 0 { + proxyMap["HTTP_PROXY"] = httpProxy + } + + if httpsProxy := proxy.Status.HTTPSProxy; len(httpsProxy) > 0 { + proxyMap["HTTPS_PROXY"] = httpsProxy + } + + if len(proxyMap) == 0 { + return nil + } + + return proxyMap +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go new file mode 100644 index 00000000000..ef5a7e30217 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go @@ -0,0 +1,105 @@ +package proxy + +import ( + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" +) + +type testLister struct { + lister configlistersv1.ProxyLister +} + +func (l testLister) ProxyLister() configlistersv1.ProxyLister { + return l.lister +} + +func (l testLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return nil +} + +func (l testLister) PreRunHasSynced() []cache.InformerSynced { + return nil +} + +func TestObserveProxyConfig(t *testing.T) { + configPath := []string{"openshift", "proxy"} + + tests := []struct { + name string + proxySpec configv1.ProxySpec + proxyStatus configv1.ProxyStatus + previous map[string]string + expected map[string]interface{} + expectedError []error + eventsExpected int + }{ + { + name: "all unset", + proxySpec: configv1.ProxySpec{}, + proxyStatus: configv1.ProxyStatus{}, + expected: map[string]interface{}{}, + expectedError: []error{}, + }, + { + name: "all set", + proxySpec: configv1.ProxySpec{ + HTTPProxy: "http://someplace.it", + HTTPSProxy: "https://someplace.it", + NoProxy: "127.0.0.1", + }, + proxyStatus: configv1.ProxyStatus{ + HTTPProxy: "http://someplace.it", + HTTPSProxy: "https://someplace.it", + NoProxy: "127.0.0.1,incluster.address.it", + }, + expected: map[string]interface{}{ + "openshift": map[string]interface{}{ + "proxy": map[string]interface{}{ + "HTTP_PROXY": "http://someplace.it", + "HTTPS_PROXY": "https://someplace.it", + "NO_PROXY": "127.0.0.1,incluster.address.it", + }, + }, + }, + expectedError: []error{}, + eventsExpected: 1, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(&configv1.Proxy{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: tt.proxySpec, + Status: tt.proxyStatus, + }) + listers := testLister{ + lister: configlistersv1.NewProxyLister(indexer), + } + eventRecorder := events.NewInMemoryRecorder("") + + initialExistingConfig := map[string]interface{}{} + + observeFn := NewProxyObserveFunc(configPath) + + got, errorsGot := observeFn(listers, eventRecorder, initialExistingConfig) + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("observeProxyFlags.ObserveProxyConfig() got = %v, want %v", got, tt.expected) + } + if !reflect.DeepEqual(errorsGot, tt.expectedError) { + t.Errorf("observeProxyFlags.ObserveProxyConfig() errorsGot = %v, want %v", errorsGot, tt.expectedError) + } + if events := eventRecorder.Events(); len(events) != tt.eventsExpected { + t.Errorf("expected %d events, but got %d: %v", tt.eventsExpected, len(events), events) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers.go new file mode 100644 index 00000000000..8ba297c5afd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers.go @@ -0,0 +1,110 @@ +package encryption + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators" + + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" + + "github.com/openshift/library-go/pkg/operator/encryption/controllers" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" +) + +type runner interface { + Run(stopCh <-chan struct{}) +} + +func NewControllers( + component string, + deployer statemachine.Deployer, + migrator migrators.Migrator, + operatorClient operatorv1helpers.OperatorClient, + apiServerClient configv1client.APIServerInterface, + apiServerInformer configv1informers.APIServerInformer, + kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces, + secretsClient corev1.SecretsGetter, + eventRecorder events.Recorder, + encryptedGRs ...schema.GroupResource, +) (*Controllers, error) { + // avoid using the CachedSecretGetter as we need strong guarantees that our encryptionSecretSelector works + // otherwise we could see secrets from a different component (which will break our keyID invariants) + // this is fine in terms of performance since these controllers will be idle most of the time + // TODO: update the eventHandlers used by the controllers to ignore components that do not match their own + encryptionSecretSelector := metav1.ListOptions{LabelSelector: secrets.EncryptionKeySecretsLabel + "=" + component} + + return &Controllers{ + controllers: []runner{ + controllers.NewKeyController( + component, + deployer, + operatorClient, + apiServerClient, + apiServerInformer, + kubeInformersForNamespaces, + secretsClient, + encryptionSecretSelector, + eventRecorder, + encryptedGRs, + ), + controllers.NewStateController( + component, + deployer, + operatorClient, + kubeInformersForNamespaces, + secretsClient, + encryptionSecretSelector, + eventRecorder, + encryptedGRs, + ), + controllers.NewPruneController( + deployer, + operatorClient, + kubeInformersForNamespaces, + secretsClient, + encryptionSecretSelector, + eventRecorder, + encryptedGRs, + ), + controllers.NewMigrationController( + component, + deployer, + migrator, + operatorClient, + kubeInformersForNamespaces, + secretsClient, + encryptionSecretSelector, + eventRecorder, + encryptedGRs, + ), + controllers.NewConditionController( + deployer, + operatorClient, + kubeInformersForNamespaces, + secretsClient, + encryptionSecretSelector, + eventRecorder, + encryptedGRs, + ), + }, + }, nil +} + +type Controllers struct { + controllers []runner +} + +func (c *Controllers) Run(stopCh <-chan struct{}) { + for _, controller := range c.controllers { + con := controller // capture range variable + go con.Run(stopCh) + } + <-stopCh +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/condition_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/condition_controller.go new file mode 100644 index 00000000000..f41b2777ea2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/condition_controller.go @@ -0,0 +1,242 @@ +package controllers + +import ( + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/encryption/state" + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + conditionWorkKey = "key" +) + +// conditionController maintains the Encrypted condition. It sets it to true iff there is a +// fully migrated read-key in the current config, and no later key is of identity type. +type conditionController struct { + operatorClient operatorv1helpers.OperatorClient + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder + + preRunCachesSynced []cache.InformerSynced + + encryptedGRs []schema.GroupResource + + encryptionSecretSelector metav1.ListOptions + + deployer statemachine.Deployer + secretClient corev1client.SecretsGetter +} + +func NewConditionController( + deployer statemachine.Deployer, + operatorClient operatorv1helpers.OperatorClient, + kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces, + secretClient corev1client.SecretsGetter, + encryptionSecretSelector metav1.ListOptions, + eventRecorder events.Recorder, + encryptedGRs []schema.GroupResource, +) *conditionController { + c := &conditionController{ + operatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "EncryptionConditionController"), + eventRecorder: eventRecorder.WithComponentSuffix("encryption-condition-controller"), + + encryptedGRs: encryptedGRs, + + encryptionSecretSelector: encryptionSecretSelector, + deployer: deployer, + secretClient: secretClient, + } + + c.preRunCachesSynced = setUpInformers(deployer, operatorClient, kubeInformersForNamespaces, c.eventHandler()) + + return c +} + +func (c *conditionController) sync() error { + if ready, err := shouldRunEncryptionController(c.operatorClient); err != nil || !ready { + return err // we will get re-kicked when the operator status updates + } + + currentConfig, desiredState, foundSecrets, transitioningReason, err := statemachine.GetEncryptionConfigAndState(c.deployer, c.secretClient, c.encryptionSecretSelector, c.encryptedGRs) + if err != nil || len(transitioningReason) > 0 { + return err + } + + cond := operatorv1.OperatorCondition{ + Type: "Encrypted", + Status: operatorv1.ConditionTrue, + Reason: "EncryptionCompleted", + Message: fmt.Sprintf("All resources encrypted: %s", grString(c.encryptedGRs)), + } + currentState, _ := encryptionconfig.ToEncryptionState(currentConfig, foundSecrets) + + if len(foundSecrets) == 0 { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "EncryptionDisabled" + cond.Message = "Encryption is not enabled" + } else { + // check for identity key in desired state first. This will make us catch upcoming decryption early before + // it settles into the current config. + for _, s := range desiredState { + if s.WriteKey.Mode != state.Identity { + continue + } + + if allMigrated(c.encryptedGRs, s.WriteKey.Migrated.Resources) { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "DecryptionCompleted" + cond.Message = "Encryption mode set to identity and everything is decrypted" + } else { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "DecryptionInProgress" + cond.Message = "Encryption mode set to identity and decryption is not finished" + } + break + } + } + if cond.Status == operatorv1.ConditionTrue { + // now that the desired state look like it won't lead to identity as write-key, test the current state + NextResource: + for _, gr := range c.encryptedGRs { + s, ok := currentState[gr] + if !ok { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "EncryptionInProgress" + cond.Message = fmt.Sprintf("Resource %s is not encrypted", gr.String()) + break NextResource + } + + if s.WriteKey.Mode == state.Identity { + if allMigrated(c.encryptedGRs, s.WriteKey.Migrated.Resources) { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "DecryptionCompleted" + cond.Message = "Encryption mode set to identity and everything is decrypted" + } else { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "DecryptionInProgress" + cond.Message = "Encryption mode set to identity and decryption is not finished" + } + break + } + + // go through read keys until we find a completely migrated one. Finding an identity mode before + // means migration is ongoing. : + for _, rk := range s.ReadKeys { + if rk.Mode == state.Identity { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "EncryptionInProgress" + cond.Message = "Encryption is ongoing" + break NextResource + } + if migratedSet(rk.Migrated.Resources).Has(gr.String()) { + continue NextResource + } + } + + cond.Status = operatorv1.ConditionFalse + cond.Reason = "EncryptionInProgress" + cond.Message = fmt.Sprintf("Resource %s is being encrypted", gr.String()) + break + } + } + + // update Encrypted condition + _, _, updateError := operatorv1helpers.UpdateStatus(c.operatorClient, operatorv1helpers.UpdateConditionFn(cond)) + return updateError +} + +func allMigrated(toBeEncrypted, migrated []schema.GroupResource) bool { + s := migratedSet(migrated) + for _, gr := range toBeEncrypted { + if !s.Has(gr.String()) { + return false + } + } + return true +} + +func migratedSet(grs []schema.GroupResource) sets.String { + migrated := sets.NewString() + for _, gr := range grs { + migrated.Insert(gr.String()) + } + return migrated +} + +func (c *conditionController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting EncryptionConditionController") + defer klog.Infof("Shutting down EncryptionConditionController") + if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // only start one worker + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *conditionController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *conditionController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *conditionController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(conditionWorkKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(conditionWorkKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(conditionWorkKey) }, + } +} + +func grString(grs []schema.GroupResource) string { + ss := make([]string, 0, len(grs)) + for _, gr := range grs { + ss = append(ss, gr.String()) + } + return strings.Join(ss, ", ") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/controller.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/controller.go new file mode 100644 index 00000000000..165f1e4212f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/controller.go @@ -0,0 +1,36 @@ +package controllers + +import ( + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" + "github.com/openshift/library-go/pkg/operator/management" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func shouldRunEncryptionController(operatorClient operatorv1helpers.OperatorClient) (bool, error) { + operatorSpec, _, _, err := operatorClient.GetOperatorState() + if err != nil { + return false, err + } + + return management.IsOperatorManaged(operatorSpec.ManagementState), nil +} + +func setUpInformers( + deployer statemachine.Deployer, + operatorClient operatorv1helpers.OperatorClient, + kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces, + eventHandler cache.ResourceEventHandler, +) []cache.InformerSynced { + operatorInformer := operatorClient.Informer() + operatorInformer.AddEventHandler(eventHandler) + + managedSecretsInformer := kubeInformersForNamespaces.InformersFor("openshift-config-managed").Core().V1().Secrets().Informer() + managedSecretsInformer.AddEventHandler(eventHandler) + + return append([]cache.InformerSynced{ + operatorInformer.HasSynced, + managedSecretsInformer.HasSynced, + }, deployer.AddEventHandler(eventHandler)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/helpers_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/helpers_test.go new file mode 100644 index 00000000000..a814cb57f24 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/helpers_test.go @@ -0,0 +1,21 @@ +package controllers + +import ( + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" +) + +func createEncryptionCfgSecret(t *testing.T, targetNs string, revision string, encryptionCfg *apiserverconfigv1.EncryptionConfiguration) *corev1.Secret { + t.Helper() + + s, err := encryptionconfig.ToSecret(targetNs, fmt.Sprintf("%s-%s", "encryption-config", revision), encryptionCfg) + if err != nil { + t.Fatal(err) + } + return s +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/key_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/key_controller.go new file mode 100644 index 00000000000..9a130da4964 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/key_controller.go @@ -0,0 +1,395 @@ +package controllers + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + apiserverv1 "k8s.io/apiserver/pkg/apis/config/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + "k8s.io/utils/pointer" + + operatorv1 "github.com/openshift/api/operator/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/crypto" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const encWorkKey = "key" + +// encryptionSecretMigrationInterval determines how much time must pass after a key has been observed as +// migrated before a new key is created by the key minting controller. The new key's ID will be one +// greater than the last key's ID (the first key has a key ID of 1). +const encryptionSecretMigrationInterval = time.Hour * 24 * 7 // one week + +// keyController creates new keys if necessary. It +// * watches +// - secrets in openshift-config-managed +// - pods in target namespace +// - secrets in target namespace +// * computes a new, desired encryption config from encryption-config- +// and the existing keys in openshift-config-managed. +// * derives from the desired encryption config whether a new key is needed due to +// - encryption is being enabled via the API or +// - a new to-be-encrypted resource shows up or +// - the EncryptionType in the API does not match with the newest existing key or +// - based on time (once a week is the proposed rotation interval) or +// - an external reason given as a string in .encryption.reason of UnsupportedConfigOverrides. +// It then creates it. +// +// Note: the "based on time" reason for a new key is based on the annotation +// encryption.apiserver.operator.openshift.io/migrated-timestamp instead of +// the key secret's creationTimestamp because the clock is supposed to +// start when a migration has been finished, not when it begins. +type keyController struct { + operatorClient operatorv1helpers.OperatorClient + apiServerClient configv1client.APIServerInterface + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder + + preRunCachesSynced []cache.InformerSynced + + encryptedGRs []schema.GroupResource + + component string + encryptionSecretSelector metav1.ListOptions + + deployer statemachine.Deployer + secretClient corev1client.SecretsGetter +} + +func NewKeyController( + component string, + deployer statemachine.Deployer, + operatorClient operatorv1helpers.OperatorClient, + apiServerClient configv1client.APIServerInterface, + apiServerInformer configv1informers.APIServerInformer, + kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces, + secretClient corev1client.SecretsGetter, + encryptionSecretSelector metav1.ListOptions, + eventRecorder events.Recorder, + encryptedGRs []schema.GroupResource, +) *keyController { + c := &keyController{ + operatorClient: operatorClient, + apiServerClient: apiServerClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "EncryptionKeyController"), + eventRecorder: eventRecorder.WithComponentSuffix("encryption-key-controller"), + + encryptedGRs: encryptedGRs, + component: component, + + encryptionSecretSelector: encryptionSecretSelector, + deployer: deployer, + secretClient: secretClient, + } + + c.preRunCachesSynced = setUpInformers(deployer, operatorClient, kubeInformersForNamespaces, c.eventHandler()) + + apiServerInformer.Informer().AddEventHandler(c.eventHandler()) + c.preRunCachesSynced = append(c.preRunCachesSynced, apiServerInformer.Informer().HasSynced) + + return c +} + +func (c *keyController) sync() error { + if ready, err := shouldRunEncryptionController(c.operatorClient); err != nil || !ready { + return err // we will get re-kicked when the operator status updates + } + + configError := c.checkAndCreateKeys() + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: "EncryptionKeyControllerDegraded", + Status: operatorv1.ConditionFalse, + } + if configError != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = configError.Error() + } + if _, _, updateError := operatorv1helpers.UpdateStatus(c.operatorClient, operatorv1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + + return configError +} + +func (c *keyController) checkAndCreateKeys() error { + currentMode, externalReason, err := c.getCurrentModeAndExternalReason() + if err != nil { + return err + } + + currentConfig, desiredEncryptionState, secrets, isProgressingReason, err := statemachine.GetEncryptionConfigAndState(c.deployer, c.secretClient, c.encryptionSecretSelector, c.encryptedGRs) + if err != nil { + return err + } + if len(isProgressingReason) > 0 { + c.queue.AddAfter(encWorkKey, 2*time.Minute) + return nil + } + + // avoid intended start of encryption + hasBeenOnBefore := currentConfig != nil || len(secrets) > 0 + if currentMode == state.Identity && !hasBeenOnBefore { + return nil + } + + var ( + newKeyRequired bool + newKeyID uint64 + reasons []string + ) + + // note here that desiredEncryptionState is never empty because getDesiredEncryptionState + // fills up the state with all resources and set identity write key if write key secrets + // are missing. + + var commonReason *string + for gr, grKeys := range desiredEncryptionState { + latestKeyID, internalReason, needed := needsNewKey(grKeys, currentMode, externalReason, c.encryptedGRs) + if !needed { + continue + } + + if commonReason == nil { + commonReason = &internalReason + } else if *commonReason != internalReason { + commonReason = pointer.StringPtr("") // this means we have no common reason + } + + newKeyRequired = true + nextKeyID := latestKeyID + 1 + if newKeyID < nextKeyID { + newKeyID = nextKeyID + } + reasons = append(reasons, fmt.Sprintf("%s-%s", gr.Resource, internalReason)) + } + if !newKeyRequired { + return nil + } + if commonReason != nil && len(*commonReason) > 0 && len(reasons) > 1 { + reasons = []string{*commonReason} // don't repeat reasons + } + + sort.Sort(sort.StringSlice(reasons)) + internalReason := strings.Join(reasons, ", ") + keySecret, err := c.generateKeySecret(newKeyID, currentMode, internalReason, externalReason) + if err != nil { + return fmt.Errorf("failed to create key: %v", err) + } + _, createErr := c.secretClient.Secrets("openshift-config-managed").Create(keySecret) + if errors.IsAlreadyExists(createErr) { + return c.validateExistingSecret(keySecret, newKeyID) + } + if createErr != nil { + c.eventRecorder.Warningf("EncryptionKeyCreateFailed", "Secret %q failed to create: %v", keySecret.Name, err) + return createErr + } + + c.eventRecorder.Eventf("EncryptionKeyCreated", "Secret %q successfully created: %q", keySecret.Name, reasons) + + return nil +} + +func (c *keyController) validateExistingSecret(keySecret *corev1.Secret, keyID uint64) error { + actualKeySecret, err := c.secretClient.Secrets("openshift-config-managed").Get(keySecret.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + actualKeyID, ok := state.NameToKeyID(actualKeySecret.Name) + if !ok || actualKeyID != keyID { + // TODO we can just get stuck in degraded here ... + return fmt.Errorf("secret %s has an invalid name, new keys cannot be created for encryption target", keySecret.Name) + } + + if _, err := secrets.ToKeyState(actualKeySecret); err != nil { + return fmt.Errorf("secret %s is invalid, new keys cannot be created for encryption target", keySecret.Name) + } + + return nil // we made this key earlier +} + +func (c *keyController) generateKeySecret(keyID uint64, currentMode state.Mode, internalReason, externalReason string) (*corev1.Secret, error) { + bs := crypto.ModeToNewKeyFunc[currentMode]() + ks := state.KeyState{ + Key: apiserverv1.Key{ + Name: fmt.Sprintf("%d", keyID), + Secret: base64.StdEncoding.EncodeToString(bs), + }, + Mode: currentMode, + InternalReason: internalReason, + ExternalReason: externalReason, + } + return secrets.FromKeyState(c.component, ks) +} + +func (c *keyController) getCurrentModeAndExternalReason() (state.Mode, string, error) { + apiServer, err := c.apiServerClient.Get("cluster", metav1.GetOptions{}) + if err != nil { + return "", "", err + } + + operatorSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return "", "", err + } + + // TODO make this un-settable once set + // ex: we could require the tech preview no upgrade flag to be set before we will honor this field + type unsupportedEncryptionConfig struct { + Encryption struct { + Reason string `json:"reason"` + } `json:"encryption"` + } + encryptionConfig := &unsupportedEncryptionConfig{} + if raw := operatorSpec.UnsupportedConfigOverrides.Raw; len(raw) > 0 { + jsonRaw, err := kyaml.ToJSON(raw) + if err != nil { + klog.Warning(err) + // maybe it's just json + jsonRaw = raw + } + if err := json.Unmarshal(jsonRaw, encryptionConfig); err != nil { + return "", "", err + } + } + + reason := encryptionConfig.Encryption.Reason + switch currentMode := state.Mode(apiServer.Spec.Encryption.Type); currentMode { + case state.AESCBC, state.Identity: // secretbox is disabled for now + return currentMode, reason, nil + case "": // unspecified means use the default (which can change over time) + return state.DefaultMode, reason, nil + default: + return "", "", fmt.Errorf("unknown encryption mode configured: %s", currentMode) + } +} + +// needsNewKey checks whether a new key must be created for the given resource. If true, it also returns the latest +// used key ID and a reason string. +func needsNewKey(grKeys state.GroupResourceState, currentMode state.Mode, externalReason string, encryptedGRs []schema.GroupResource) (uint64, string, bool) { + // we always need to have some encryption keys unless we are turned off + if len(grKeys.ReadKeys) == 0 { + return 0, "key-does-not-exist", currentMode != state.Identity + } + + latestKey := grKeys.ReadKeys[0] + latestKeyID, ok := state.NameToKeyID(latestKey.Key.Name) + if !ok { + return latestKeyID, fmt.Sprintf("key-secret-%d-is-invalid", latestKeyID), true + } + + // if latest secret has been deleted, we will never be able to migrate to that key. + if !latestKey.Backed { + return latestKeyID, fmt.Sprintf("encryption-config-key-%d-not-backed-by-secret", latestKeyID), true + } + + // check that we have pruned read-keys: the write-keys, plus at most one more backed read-key (potentially some unbacked once before) + backedKeys := 0 + for _, rk := range grKeys.ReadKeys { + if rk.Backed { + backedKeys++ + } + } + if backedKeys > 2 { + return 0, "", false + } + + // we have not migrated the latest key, do nothing until that is complete + if allMigrated, _, _ := state.MigratedFor(encryptedGRs, latestKey); !allMigrated { + return 0, "", false + } + + // if the most recent secret was encrypted in a mode different than the current mode, we need to generate a new key + if latestKey.Mode != currentMode { + return latestKeyID, "encryption-mode-changed", true + } + + // if the most recent secret turned off encryption and we want to keep it that way, do nothing + if latestKey.Mode == state.Identity && currentMode == state.Identity { + return 0, "", false + } + + // if the most recent secret has a different external reason than the current reason, we need to generate a new key + if latestKey.ExternalReason != externalReason && len(externalReason) != 0 { + return latestKeyID, "external-reason-changed", true + } + + // we check for encryptionSecretMigratedTimestamp set by migration controller to determine when migration completed + // this also generates back pressure for key rotation when migration takes a long time or was recently completed + return latestKeyID, "rotation-interval-has-passed", time.Since(latestKey.Migrated.Timestamp) > encryptionSecretMigrationInterval +} + +func (c *keyController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting EncryptionKeyController") + defer klog.Infof("Shutting down EncryptionKeyController") + if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // only start one worker + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *keyController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *keyController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *keyController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(encWorkKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(encWorkKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(encWorkKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/key_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/key_controller_test.go new file mode 100644 index 00000000000..779307285d0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/key_controller_test.go @@ -0,0 +1,369 @@ +package controllers + +import ( + "encoding/base64" + "errors" + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + configv1clientfake "github.com/openshift/client-go/config/clientset/versioned/fake" + configv1informers "github.com/openshift/client-go/config/informers/externalversions" + + encryptiondeployer "github.com/openshift/library-go/pkg/operator/encryption/deployer" + encryptiontesting "github.com/openshift/library-go/pkg/operator/encryption/testing" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func TestKeyController(t *testing.T) { + apiServerAesCBC := []runtime.Object{&configv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: configv1.APIServerSpec{ + Encryption: configv1.APIServerEncryption{ + Type: "aescbc", + }, + }, + }} + + scenarios := []struct { + name string + initialObjects []runtime.Object + apiServerObjects []runtime.Object + encryptionSecretSelector metav1.ListOptions + targetNamespace string + targetGRs []schema.GroupResource + // expectedActions holds actions to be verified in the form of "verb:resource:namespace" + expectedActions []string + validateFunc func(ts *testing.T, actions []clientgotesting.Action, targetNamespace string, targetGRs []schema.GroupResource) + validateOperatorClientFunc func(ts *testing.T, operatorClient v1helpers.OperatorClient) + expectedError error + }{ + { + name: "no apiservers config", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + targetNamespace: "kms", + initialObjects: []runtime.Object{}, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, targetNamespace string, targetGRs []schema.GroupResource) { + }, + expectedError: fmt.Errorf(`apiservers.config.openshift.io "cluster" not found`), + expectedActions: []string{}, + }, + + { + name: "no pod", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + targetNamespace: "kms", + initialObjects: []runtime.Object{}, + apiServerObjects: []runtime.Object{&configv1.APIServer{ObjectMeta: metav1.ObjectMeta{Name: "cluster"}}}, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, targetNamespace string, targetGRs []schema.GroupResource) { + }, + expectedActions: []string{"list:pods:kms"}, + }, + + { + name: "encryption disabled", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + targetNamespace: "kms", + initialObjects: []runtime.Object{encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1")}, + apiServerObjects: []runtime.Object{&configv1.APIServer{ObjectMeta: metav1.ObjectMeta{Name: "cluster"}}}, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, targetNamespace string, targetGRs []schema.GroupResource) { + }, + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed"}, + }, + + // Assumes a clean slate, that is, there are no previous resources in the system. + // It expects that a secret resource with an appropriate key, name and labels will be created. + { + name: "checks if a secret with AES256 key for core/secret is created", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + targetNamespace: "kms", + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "create:secrets:openshift-config-managed", "create:events:kms"}, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + apiServerObjects: []runtime.Object{&configv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: configv1.APIServerSpec{ + Encryption: configv1.APIServerEncryption{ + Type: "aescbc", + }, + }, + }}, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, targetNamespace string, targetGRs []schema.GroupResource) { + wasSecretValidated := false + for _, action := range actions { + if action.Matches("create", "secrets") { + createAction := action.(clientgotesting.CreateAction) + actualSecret := createAction.GetObject().(*corev1.Secret) + expectedSecret := encryptiontesting.CreateEncryptionKeySecretWithKeyFromExistingSecret(targetNamespace, []schema.GroupResource{}, 1, actualSecret) + expectedSecret.Annotations["encryption.apiserver.operator.openshift.io/internal-reason"] = "secrets-key-does-not-exist" // TODO: Fix this + if !equality.Semantic.DeepEqual(actualSecret, expectedSecret) { + ts.Errorf(diff.ObjectDiff(expectedSecret, actualSecret)) + } + if err := encryptiontesting.ValidateEncryptionKey(actualSecret); err != nil { + ts.Error(err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + }, + }, + + { + name: "no-op when a valid write key exists, but is not migrated", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 7, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + apiServerObjects: apiServerAesCBC, + targetNamespace: "kms", + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed"}, + }, + + { + name: "no-op when a valid write key exists, is migrated, but not expired", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 7, []byte("61def964fb967f5d7c44a2af8dab6865"), time.Now()), + }, + apiServerObjects: apiServerAesCBC, + targetNamespace: "kms", + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed"}, + }, + + { + name: "creates a new write key because previous one is migrated, but has no migration timestamp", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 7, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + apiServerObjects: apiServerAesCBC, + targetNamespace: "kms", + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "create:secrets:openshift-config-managed", "create:events:kms"}, + }, + + { + name: "creates a new write key because the previous one expired", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 5, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + apiServerObjects: apiServerAesCBC, + targetNamespace: "kms", + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "create:secrets:openshift-config-managed", "create:events:kms"}, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, targetNamespace string, targetGRs []schema.GroupResource) { + wasSecretValidated := false + for _, action := range actions { + if action.Matches("create", "secrets") { + createAction := action.(clientgotesting.CreateAction) + actualSecret := createAction.GetObject().(*corev1.Secret) + expectedSecret := encryptiontesting.CreateEncryptionKeySecretWithKeyFromExistingSecret(targetNamespace, []schema.GroupResource{}, 6, actualSecret) + expectedSecret.Annotations["encryption.apiserver.operator.openshift.io/internal-reason"] = "secrets-rotation-interval-has-passed" + if !equality.Semantic.DeepEqual(actualSecret, expectedSecret) { + ts.Errorf(diff.ObjectDiff(expectedSecret, actualSecret)) + } + if err := encryptiontesting.ValidateEncryptionKey(actualSecret); err != nil { + ts.Error(err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + }, + }, + + { + name: "create a new write key when the previous key expired and another read key exists", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 6, []byte("61def964fb967f5d7c44a2af8dab6865")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 5, []byte("51def964fb967f5d7c44a2af8dab6865")), + func() *corev1.Secret { + keysResForSecrets := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "6", + Secret: base64.StdEncoding.EncodeToString([]byte("61def964fb967f5d7c44a2af8dab6865")), + }, + { + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("51def964fb967f5d7c44a2af8dab6865")), + }, + }, + } + + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysResForSecrets}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + ecs.APIVersion = corev1.SchemeGroupVersion.String() + + return ecs + }(), + }, + apiServerObjects: apiServerAesCBC, + targetNamespace: "kms", + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "create:secrets:openshift-config-managed", + "create:events:kms", + }, + }, + + { + name: "no-op when the previous key was migrated and the current one is valid but hasn't been observed", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 5, []byte("61def964fb967f5d7c44a2af8dab6865")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 6, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + apiServerObjects: apiServerAesCBC, + targetNamespace: "kms", + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed"}, + }, + + { + name: "degraded a secret with invalid key exists", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialObjects: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("")), + }, + apiServerObjects: apiServerAesCBC, + targetNamespace: "kms", + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "create:secrets:openshift-config-managed", "get:secrets:openshift-config-managed"}, + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedCondition := operatorv1.OperatorCondition{ + Type: "EncryptionKeyControllerDegraded", + Status: "True", + Reason: "Error", + Message: "secret encryption-key-kms-1 is invalid, new keys cannot be created for encryption target", + } + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, []operatorv1.OperatorCondition{expectedCondition}) + }, + expectedError: errors.New("secret encryption-key-kms-1 is invalid, new keys cannot be created for encryption target"), + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // setup + fakeOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + OperatorStatus: operatorv1.OperatorStatus{ + // we need to set up proper conditions before the test starts because + // the controller calls UpdateStatus which calls UpdateOperatorStatus method which is unsupported (fake client) and throws an exception + Conditions: []operatorv1.OperatorCondition{ + { + Type: "EncryptionKeyControllerDegraded", + Status: "False", + }, + }, + }, + NodeStatuses: []operatorv1.NodeStatus{ + {NodeName: "node-1"}, + }, + }, + nil, + nil, + ) + + fakeKubeClient := fake.NewSimpleClientset(scenario.initialObjects...) + eventRecorder := events.NewRecorder(fakeKubeClient.CoreV1().Events(scenario.targetNamespace), "test-encryptionKeyController", &corev1.ObjectReference{}) + // pass informer for + // - target namespace: pods and secrets + // - openshift-config-managed: secrets + // note that the informer factory is not used in the test - it's only needed to create the controller + kubeInformers := v1helpers.NewKubeInformersForNamespaces(fakeKubeClient, "openshift-config-managed", scenario.targetNamespace) + fakeSecretClient := fakeKubeClient.CoreV1() + fakePodClient := fakeKubeClient.CoreV1() + fakeConfigClient := configv1clientfake.NewSimpleClientset(scenario.apiServerObjects...) + fakeApiServerClient := fakeConfigClient.ConfigV1().APIServers() + fakeApiServerInformer := configv1informers.NewSharedInformerFactory(fakeConfigClient, time.Minute).Config().V1().APIServers() + + deployer, err := encryptiondeployer.NewRevisionLabelPodDeployer("revision", scenario.targetNamespace, kubeInformers, nil, fakePodClient, fakeSecretClient, encryptiondeployer.StaticPodNodeProvider{OperatorClient: fakeOperatorClient}) + if err != nil { + t.Fatal(err) + } + + target := NewKeyController(scenario.targetNamespace, deployer, fakeOperatorClient, fakeApiServerClient, fakeApiServerInformer, kubeInformers, fakeSecretClient, scenario.encryptionSecretSelector, eventRecorder, scenario.targetGRs) + + // act + err = target.sync() + + // validate + if err == nil && scenario.expectedError != nil { + t.Fatal("expected to get an error from sync() method") + } + if err != nil && scenario.expectedError == nil { + t.Fatal(err) + } + if err != nil && scenario.expectedError != nil && err.Error() != scenario.expectedError.Error() { + t.Fatalf("unexpected error returned = %v, expected = %v", err, scenario.expectedError) + } + if err := encryptiontesting.ValidateActionsVerbs(fakeKubeClient.Actions(), scenario.expectedActions); err != nil { + t.Fatalf("incorrect action(s) detected: %v", err) + } + if scenario.validateFunc != nil { + scenario.validateFunc(t, fakeKubeClient.Actions(), scenario.targetNamespace, scenario.targetGRs) + } + if scenario.validateOperatorClientFunc != nil { + scenario.validateOperatorClientFunc(t, fakeOperatorClient) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migration_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migration_controller.go new file mode 100644 index 00000000000..002d52b8baf --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migration_controller.go @@ -0,0 +1,377 @@ +package controllers + +import ( + "encoding/json" + "fmt" + "reflect" + "sort" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/retry" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators" + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + migrationWorkKey = "key" + + // how long to wait until we retry a migration when it failed with unknown errors. + migrationRetryDuration = time.Minute * 5 +) + +// The migrationController controller migrates resources to a new write key +// and annotated the write key secret afterwards with the migrated GRs. It +// +// * watches pods and secrets in +// * watches secrets in openshift-config-manager +// * computes a new, desired encryption config from encryption-config- +// and the existing keys in openshift-config-managed. +// * compares desired with current target config and stops when they differ +// * checks the write-key secret whether +// - encryption.apiserver.operator.openshift.io/migrated-timestamp annotation +// is missing or +// - a write-key for a resource does not show up in the +// encryption.apiserver.operator.openshift.io/migrated-resources And then +// starts a migration job (currently in-place synchronously, soon with the upstream migration tool) +// * updates the encryption.apiserver.operator.openshift.io/migrated-timestamp and +// encryption.apiserver.operator.openshift.io/migrated-resources annotations on the +// current write-key secrets. +type migrationController struct { + component string + + operatorClient operatorv1helpers.OperatorClient + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder + + preRunCachesSynced []cache.InformerSynced + + encryptedGRs []schema.GroupResource + + encryptionSecretSelector metav1.ListOptions + + secretClient corev1client.SecretsGetter + + deployer statemachine.Deployer + migrator migrators.Migrator +} + +func NewMigrationController( + component string, + deployer statemachine.Deployer, + migrator migrators.Migrator, + operatorClient operatorv1helpers.OperatorClient, + kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces, + secretClient corev1client.SecretsGetter, + encryptionSecretSelector metav1.ListOptions, + eventRecorder events.Recorder, + encryptedGRs []schema.GroupResource, +) *migrationController { + c := &migrationController{ + component: component, + operatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "EncryptionMigrationController"), + eventRecorder: eventRecorder.WithComponentSuffix("encryption-migration-controller"), + + encryptedGRs: encryptedGRs, + + encryptionSecretSelector: encryptionSecretSelector, + secretClient: secretClient, + deployer: deployer, + migrator: migrator, + } + + c.preRunCachesSynced = setUpInformers(deployer, operatorClient, kubeInformersForNamespaces, c.eventHandler()) + c.preRunCachesSynced = append(c.preRunCachesSynced, migrator.AddEventHandler(c.eventHandler())...) + + return c +} + +func (c *migrationController) sync() error { + if ready, err := shouldRunEncryptionController(c.operatorClient); err != nil || !ready { + return err // we will get re-kicked when the operator status updates + } + + migratingResources, migrationError := c.migrateKeysIfNeededAndRevisionStable() + + // update failing condition + degraded := operatorv1.OperatorCondition{ + Type: "EncryptionMigrationControllerDegraded", + Status: operatorv1.ConditionFalse, + } + if migrationError != nil { + degraded.Status = operatorv1.ConditionTrue + degraded.Reason = "Error" + degraded.Message = migrationError.Error() + } + + // update progressing condition + progressing := operatorv1.OperatorCondition{ + Type: "EncryptionMigrationControllerProgressing", + Status: operatorv1.ConditionFalse, + } + if len(migratingResources) > 0 { + progressing.Status = operatorv1.ConditionTrue + progressing.Reason = "Migrating" + progressing.Message = fmt.Sprintf("migrating resources to a new write key: %v", grsToHumanReadable(migratingResources)) + } + + if _, _, updateError := operatorv1helpers.UpdateStatus(c.operatorClient, operatorv1helpers.UpdateConditionFn(degraded), operatorv1helpers.UpdateConditionFn(progressing)); updateError != nil { + return updateError + } + + return migrationError +} + +func (c *migrationController) setProgressing(migrating bool, reason, message string, args ...interface{}) error { + // update progressing condition + progressing := operatorv1.OperatorCondition{ + Type: "EncryptionMigrationControllerProgressing", + Status: operatorv1.ConditionTrue, + Reason: reason, + Message: fmt.Sprintf(message, args...), + } + if !migrating { + progressing.Status = operatorv1.ConditionFalse + } + + _, _, err := operatorv1helpers.UpdateStatus(c.operatorClient, operatorv1helpers.UpdateConditionFn(progressing)) + return err +} + +// TODO doc +func (c *migrationController) migrateKeysIfNeededAndRevisionStable() (migratingResources []schema.GroupResource, err error) { + // no storage migration during revision changes + currentEncryptionConfig, desiredEncryptionState, _, isTransitionalReason, err := statemachine.GetEncryptionConfigAndState(c.deployer, c.secretClient, c.encryptionSecretSelector, c.encryptedGRs) + if err != nil { + return nil, err + } + if currentEncryptionConfig == nil || len(isTransitionalReason) > 0 { + c.queue.AddAfter(migrationWorkKey, 2*time.Minute) + return nil, nil + } + + encryptionSecrets, err := secrets.ListKeySecrets(c.secretClient, c.encryptionSecretSelector) + if err != nil { + return nil, err + } + currentState, _ := encryptionconfig.ToEncryptionState(currentEncryptionConfig, encryptionSecrets) + desiredEncryptedConfig := encryptionconfig.FromEncryptionState(desiredEncryptionState) + + // no storage migration until config is stable + if !reflect.DeepEqual(currentEncryptionConfig.Resources, desiredEncryptedConfig.Resources) { + // stop all running migrations + for gr := range currentState { + if err := c.migrator.PruneMigration(gr); err != nil { + klog.Warningf("failed to interrupt migration for resource %s", gr) + // ignore error + } + } + + c.queue.AddAfter(migrationWorkKey, 2*time.Minute) + return nil, nil // retry in a little while but do not go degraded + } + + // sort by gr to get deterministic condition strings + grs := []schema.GroupResource{} + for gr := range currentState { + grs = append(grs, gr) + } + sort.Slice(grs, func(i, j int) bool { + return grs[i].String() < grs[j].String() + }) + + // all API servers have converged onto a single revision that matches our desired overall encryption state + // now we know that it is safe to attempt key migrations + // we never want to migrate during an intermediate state because that could lead to one API server + // using a write key that another API server has not observed + // this could lead to etcd storing data that not all API servers can decrypt + var errs []error + for _, gr := range grs { + grActualKeys := currentState[gr] + if !grActualKeys.HasWriteKey() { + continue // no write key to migrate to + } + + if alreadyMigrated, _, _ := state.MigratedFor([]schema.GroupResource{gr}, grActualKeys.WriteKey); alreadyMigrated { + continue + } + + // idem-potent migration start + finished, result, when, err := c.migrator.EnsureMigration(gr, grActualKeys.WriteKey.Key.Name) + if err == nil && finished && result != nil && time.Since(when) > migrationRetryDuration { + // last migration error is far enough ago. Prune and retry. + if err := c.migrator.PruneMigration(gr); err != nil { + errs = append(errs, err) + continue + } + finished, result, when, err = c.migrator.EnsureMigration(gr, grActualKeys.WriteKey.Key.Name) + + } + if err != nil { + errs = append(errs, err) + continue + } + if finished && result != nil { + errs = append(errs, result) + continue + } + + if !finished { + migratingResources = append(migratingResources, gr) + continue + } + + // update secret annotations + oldWriteKey, err := secrets.FromKeyState(c.component, grActualKeys.WriteKey) + if err != nil { + errs = append(errs, result) + continue + } + if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + s, err := c.secretClient.Secrets(oldWriteKey.Namespace).Get(oldWriteKey.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get key secret %s/%s: %v", oldWriteKey.Namespace, oldWriteKey.Name, err) + } + + changed, err := setResourceMigrated(gr, s) + if !changed { + return nil + } + + _, _, updateErr := resourceapply.ApplySecret(c.secretClient, c.eventRecorder, s) + return updateErr + }); err != nil { + errs = append(errs, err) + continue + } + } + + return migratingResources, errors.NewAggregate(errs) +} + +func setResourceMigrated(gr schema.GroupResource, s *corev1.Secret) (bool, error) { + migratedGRs := secrets.MigratedGroupResources{} + if existing, found := s.Annotations[secrets.EncryptionSecretMigratedResources]; found { + if err := json.Unmarshal([]byte(existing), &migratedGRs); err != nil { + // ignore error and just start fresh, causing some more migration at worst + migratedGRs = secrets.MigratedGroupResources{} + } + } + + alreadyMigrated := false + for _, existingGR := range migratedGRs.Resources { + if existingGR == gr { + alreadyMigrated = true + break + } + } + + // update timestamp, if missing or first migration of gr + if _, found := s.Annotations[secrets.EncryptionSecretMigratedTimestamp]; found && alreadyMigrated { + return false, nil + } + if s.Annotations == nil { + s.Annotations = map[string]string{} + } + s.Annotations[secrets.EncryptionSecretMigratedTimestamp] = time.Now().Format(time.RFC3339) + + // update resource list + if !alreadyMigrated { + migratedGRs.Resources = append(migratedGRs.Resources, gr) + bs, err := json.Marshal(migratedGRs) + if err != nil { + return false, fmt.Errorf("failed to marshal %s annotation value %#v for key secret %s/%s", secrets.EncryptionSecretMigratedResources, migratedGRs, s.Namespace, s.Name) + } + s.Annotations[secrets.EncryptionSecretMigratedResources] = string(bs) + } + + return true, nil +} + +func (c *migrationController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting EncryptionMigrationController") + defer klog.Infof("Shutting down EncryptionMigrationController") + if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // only start one worker + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *migrationController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *migrationController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *migrationController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(migrationWorkKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(migrationWorkKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(migrationWorkKey) }, + } +} + +// groupToHumanReadable extracts a group from gr and makes it more readable, for example it converts an empty group to "core" +// Note: do not use it to get resources from the server only when printing to a log file +func groupToHumanReadable(gr schema.GroupResource) string { + group := gr.Group + if len(group) == 0 { + group = "core" + } + return group +} + +func grsToHumanReadable(grs []schema.GroupResource) []string { + ret := make([]string, 0, len(grs)) + for _, gr := range grs { + ret = append(ret, fmt.Sprintf("%s/%s", groupToHumanReadable(gr), gr.Resource)) + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migration_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migration_controller_test.go new file mode 100644 index 00000000000..e025493d05b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migration_controller_test.go @@ -0,0 +1,761 @@ +package controllers + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" + + encryptiondeployer "github.com/openshift/library-go/pkg/operator/encryption/deployer" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + encryptiontesting "github.com/openshift/library-go/pkg/operator/encryption/testing" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func TestMigrationController(t *testing.T) { + scenarios := []struct { + name string + initialResources []runtime.Object + initialSecrets []*corev1.Secret + encryptionSecretSelector metav1.ListOptions + targetNamespace string + targetGRs []schema.GroupResource + targetAPIResources []metav1.APIResource + // expectedActions holds actions to be verified in the form of "verb:resource:namespace" + expectedActions []string + + expectedMigratorCalls []string + migratorEnsureReplies map[schema.GroupResource]map[string]finishedResultErr + migratorPruneReplies map[schema.GroupResource]error + + validateFunc func(ts *testing.T, actionsKube []clientgotesting.Action, initialSecrets []*corev1.Secret, targetGRs []schema.GroupResource, unstructuredObjs []runtime.Object) + validateOperatorClientFunc func(ts *testing.T, operatorClient v1helpers.OperatorClient) + expectedError error + }{ + { + name: "no config => nothing happens", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + {Group: "", Resource: "configmaps"}, + }, + targetAPIResources: []metav1.APIResource{ + { + Name: "secrets", + Namespaced: true, + Group: "", + Version: "v1", + }, + { + Name: "configmaps", + Namespaced: true, + Group: "", + Version: "v1", + }, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + initialSecrets: nil, + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + }, + }, + + { + name: "migrations are unfinished", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + {Group: "", Resource: "configmaps"}, + }, + targetAPIResources: []metav1.APIResource{ + { + Name: "secrets", + Namespaced: true, + Group: "", + Version: "v1", + }, + { + Name: "configmaps", + Namespaced: true, + Group: "", + Version: "v1", + }, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + initialSecrets: []*corev1.Secret{ + func() *corev1.Secret { + s := encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")) + s.Kind = "Secret" + s.APIVersion = corev1.SchemeGroupVersion.String() + return s + }(), + func() *corev1.Secret { + keysResForSecrets := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + keysResForConfigMaps := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysResForConfigMaps, keysResForSecrets}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + ecs.APIVersion = corev1.SchemeGroupVersion.String() + + return ecs + }(), + }, + migratorEnsureReplies: map[schema.GroupResource]map[string]finishedResultErr{ + {Group: "", Resource: "secrets"}: {"1": {finished: false}}, + {Group: "", Resource: "configmaps"}: {"1": {finished: false}}, + }, + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "list:secrets:openshift-config-managed", + }, + expectedMigratorCalls: []string{ + "ensure:configmaps:1", + "ensure:secrets:1", + }, + validateFunc: func(ts *testing.T, actionsKube []clientgotesting.Action, initialSecrets []*corev1.Secret, targetGRs []schema.GroupResource, unstructuredObjs []runtime.Object) { + validateSecretsWereAnnotated(ts, []schema.GroupResource{}, actionsKube, nil, []*corev1.Secret{initialSecrets[0]}) + }, + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedConditions := []operatorv1.OperatorCondition{ + { + Type: "EncryptionMigrationControllerDegraded", + Status: "False", + }, + { + Type: "EncryptionMigrationControllerProgressing", + Reason: "Migrating", + Message: "migrating resources to a new write key: [core/configmaps core/secrets]", + Status: "True", + }, + } + // TODO: test sequence of condition changes, not only the end result + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, expectedConditions) + }, + }, + + { + name: "configmaps are migrated, secrets are not finished", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + {Group: "", Resource: "configmaps"}, + }, + targetAPIResources: []metav1.APIResource{ + { + Name: "secrets", + Namespaced: true, + Group: "", + Version: "v1", + }, + { + Name: "configmaps", + Namespaced: true, + Group: "", + Version: "v1", + }, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + initialSecrets: []*corev1.Secret{ + func() *corev1.Secret { + s := encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")) + s.Kind = "Secret" + s.APIVersion = corev1.SchemeGroupVersion.String() + return s + }(), + func() *corev1.Secret { + keysResForSecrets := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + keysResForConfigMaps := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysResForConfigMaps, keysResForSecrets}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + ecs.APIVersion = corev1.SchemeGroupVersion.String() + + return ecs + }(), + }, + migratorEnsureReplies: map[schema.GroupResource]map[string]finishedResultErr{ + {Group: "", Resource: "secrets"}: {"1": {finished: false}}, + {Group: "", Resource: "configmaps"}: {"1": {finished: true}}, + }, + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "list:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "create:events:operator", + }, + expectedMigratorCalls: []string{ + "ensure:configmaps:1", + "ensure:secrets:1", + }, + validateFunc: func(ts *testing.T, actionsKube []clientgotesting.Action, initialSecrets []*corev1.Secret, targetGRs []schema.GroupResource, unstructuredObjs []runtime.Object) { + validateSecretsWereAnnotated(ts, []schema.GroupResource{{Group: "", Resource: "configmaps"}}, actionsKube, []*corev1.Secret{initialSecrets[0]}, nil) + }, + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedConditions := []operatorv1.OperatorCondition{ + { + Type: "EncryptionMigrationControllerDegraded", + Status: "False", + }, + { + Type: "EncryptionMigrationControllerProgressing", + Reason: "Migrating", + Message: "migrating resources to a new write key: [core/secrets]", + Status: "True", + }, + } + // TODO: test sequence of condition changes, not only the end result + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, expectedConditions) + }, + }, + + { + name: "all migrations are finished", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + {Group: "", Resource: "configmaps"}, + }, + targetAPIResources: []metav1.APIResource{ + { + Name: "secrets", + Namespaced: true, + Group: "", + Version: "v1", + }, + { + Name: "configmaps", + Namespaced: true, + Group: "", + Version: "v1", + }, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + initialSecrets: []*corev1.Secret{ + func() *corev1.Secret { + s := encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")) + s.Kind = "Secret" + s.APIVersion = corev1.SchemeGroupVersion.String() + return s + }(), + func() *corev1.Secret { + keysResForSecrets := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + keysResForConfigMaps := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysResForConfigMaps, keysResForSecrets}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + ecs.APIVersion = corev1.SchemeGroupVersion.String() + + return ecs + }(), + }, + migratorEnsureReplies: map[schema.GroupResource]map[string]finishedResultErr{ + {Group: "", Resource: "secrets"}: {"1": {finished: true}}, + {Group: "", Resource: "configmaps"}: {"1": {finished: true}}, + }, + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "list:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "create:events:operator", + "get:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "create:events:operator", + }, + expectedMigratorCalls: []string{ + "ensure:configmaps:1", + "ensure:secrets:1", + }, + validateFunc: func(ts *testing.T, actionsKube []clientgotesting.Action, initialSecrets []*corev1.Secret, targetGRs []schema.GroupResource, unstructuredObjs []runtime.Object) { + validateSecretsWereAnnotated(ts, []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, actionsKube, []*corev1.Secret{initialSecrets[0]}, nil) + }, + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedConditions := []operatorv1.OperatorCondition{ + { + Type: "EncryptionMigrationControllerDegraded", + Status: "False", + }, + { + Type: "EncryptionMigrationControllerProgressing", + Status: "False", + }, + } + // TODO: test sequence of condition changes, not only the end result + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, expectedConditions) + }, + }, + + { + name: "configmap migration failed", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + {Group: "", Resource: "configmaps"}, + }, + targetAPIResources: []metav1.APIResource{ + { + Name: "secrets", + Namespaced: true, + Group: "", + Version: "v1", + }, + { + Name: "configmaps", + Namespaced: true, + Group: "", + Version: "v1", + }, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + initialSecrets: []*corev1.Secret{ + func() *corev1.Secret { + s := encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")) + s.Kind = "Secret" + s.APIVersion = corev1.SchemeGroupVersion.String() + return s + }(), + func() *corev1.Secret { + keysResForSecrets := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + keysResForConfigMaps := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysResForConfigMaps, keysResForSecrets}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + ecs.APIVersion = corev1.SchemeGroupVersion.String() + + return ecs + }(), + }, + migratorEnsureReplies: map[schema.GroupResource]map[string]finishedResultErr{ + {Group: "", Resource: "secrets"}: {"1": {finished: false}}, + {Group: "", Resource: "configmaps"}: {"1": {finished: true, result: errors.New("configmap migration failed")}}, + }, + expectedError: errors.New("configmap migration failed"), + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "list:secrets:openshift-config-managed", + }, + expectedMigratorCalls: []string{ + "ensure:configmaps:1", + "ensure:secrets:1", + }, + validateFunc: func(ts *testing.T, actionsKube []clientgotesting.Action, initialSecrets []*corev1.Secret, targetGRs []schema.GroupResource, unstructuredObjs []runtime.Object) { + validateSecretsWereAnnotated(ts, []schema.GroupResource{}, actionsKube, nil, []*corev1.Secret{initialSecrets[0]}) + }, + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedConditions := []operatorv1.OperatorCondition{ + { + Type: "EncryptionMigrationControllerDegraded", + Reason: "Error", + Message: "configmap migration failed", + Status: "True", + }, + { + Type: "EncryptionMigrationControllerProgressing", + Reason: "Migrating", + Message: "migrating resources to a new write key: [core/secrets]", + Status: "True", + }, + } + // TODO: test sequence of condition changes, not only the end result + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, expectedConditions) + }, + }, + + { + name: "configmap migration creation failed", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + {Group: "", Resource: "configmaps"}, + }, + targetAPIResources: []metav1.APIResource{ + { + Name: "secrets", + Namespaced: true, + Group: "", + Version: "v1", + }, + { + Name: "configmaps", + Namespaced: true, + Group: "", + Version: "v1", + }, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + initialSecrets: []*corev1.Secret{ + func() *corev1.Secret { + s := encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")) + s.Kind = "Secret" + s.APIVersion = corev1.SchemeGroupVersion.String() + return s + }(), + func() *corev1.Secret { + keysResForSecrets := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + keysResForConfigMaps := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "1", + Secret: "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", + }, + }, + } + + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysResForConfigMaps, keysResForSecrets}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + ecs.APIVersion = corev1.SchemeGroupVersion.String() + + return ecs + }(), + }, + migratorEnsureReplies: map[schema.GroupResource]map[string]finishedResultErr{ + {Group: "", Resource: "secrets"}: {"1": {finished: false}}, + {Group: "", Resource: "configmaps"}: {"1": {finished: false, err: errors.New("failed to start configmap migration")}}, + }, + expectedError: errors.New("failed to start configmap migration"), + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "list:secrets:openshift-config-managed", + }, + expectedMigratorCalls: []string{ + "ensure:configmaps:1", + "ensure:secrets:1", + }, + validateFunc: func(ts *testing.T, actionsKube []clientgotesting.Action, initialSecrets []*corev1.Secret, targetGRs []schema.GroupResource, unstructuredObjs []runtime.Object) { + validateSecretsWereAnnotated(ts, []schema.GroupResource{}, actionsKube, nil, []*corev1.Secret{initialSecrets[0]}) + }, + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedConditions := []operatorv1.OperatorCondition{ + { + Type: "EncryptionMigrationControllerDegraded", + Reason: "Error", + Message: "failed to start configmap migration", + Status: "True", + }, + { + Type: "EncryptionMigrationControllerProgressing", + Reason: "Migrating", + Message: "migrating resources to a new write key: [core/secrets]", + Status: "True", + }, + } + // TODO: test sequence of condition changes, not only the end result + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, expectedConditions) + }, + }, + + // TODO: add more tests for not so happy paths + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // setup + fakeOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + OperatorStatus: operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "EncryptionMigrationControllerDegraded", + Status: "False", + }, + { + Type: "EncryptionMigrationControllerProgressing", + Status: operatorv1.ConditionFalse, + }, + }, + }, + NodeStatuses: []operatorv1.NodeStatus{ + {NodeName: "node-1"}, + }, + }, + nil, + nil, + ) + + allResources := []runtime.Object{} + allResources = append(allResources, scenario.initialResources...) + for _, initialSecret := range scenario.initialSecrets { + allResources = append(allResources, initialSecret) + } + fakeKubeClient := fake.NewSimpleClientset(allResources...) + eventRecorder := events.NewRecorder(fakeKubeClient.CoreV1().Events("operator"), "test-encryptionKeyController", &corev1.ObjectReference{}) + // we pass "openshift-config-managed" and $targetNamespace ns because the controller creates an informer for secrets in that namespace. + // note that the informer factory is not used in the test - it's only needed to create the controller + kubeInformers := v1helpers.NewKubeInformersForNamespaces(fakeKubeClient, "openshift-config-managed", scenario.targetNamespace) + fakeSecretClient := fakeKubeClient.CoreV1() + + // let dynamic client know about the resources we want to encrypt + resourceRequiresEncyrptionFunc := func(kind string) bool { + if len(kind) == 0 { + return false + } + for _, gr := range scenario.targetGRs { + if strings.HasPrefix(gr.Resource, strings.ToLower(kind)) { + return true + } + } + return false + } + unstructuredObjs := []runtime.Object{} + for _, rawObject := range allResources { + rawUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(rawObject.DeepCopyObject()) + if err != nil { + t.Fatal(err) + } + unstructuredObj := &unstructured.Unstructured{Object: rawUnstructured} + if resourceRequiresEncyrptionFunc(unstructuredObj.GetKind()) { + unstructuredObjs = append(unstructuredObjs, unstructuredObj) + } + } + + deployer, err := encryptiondeployer.NewRevisionLabelPodDeployer("revision", scenario.targetNamespace, kubeInformers, nil, fakeKubeClient.CoreV1(), fakeSecretClient, encryptiondeployer.StaticPodNodeProvider{OperatorClient: fakeOperatorClient}) + if err != nil { + t.Fatal(err) + } + migrator := &fakeMigrator{ + ensureReplies: scenario.migratorEnsureReplies, + pruneReplies: scenario.migratorPruneReplies, + } + + // act + target := NewMigrationController( + "kms", + deployer, + migrator, + fakeOperatorClient, + kubeInformers, + fakeSecretClient, + scenario.encryptionSecretSelector, + eventRecorder, + scenario.targetGRs, + ) + err = target.sync() + + // validate + if err == nil && scenario.expectedError != nil { + t.Fatal("expected to get an error from sync() method") + } + if err != nil && scenario.expectedError == nil { + t.Fatal(err) + } + if err != nil && scenario.expectedError != nil && err.Error() != scenario.expectedError.Error() { + t.Fatalf("unexpected error returned = %v, expected = %v", err, scenario.expectedError) + } + if err := encryptiontesting.ValidateActionsVerbs(fakeKubeClient.Actions(), scenario.expectedActions); err != nil { + t.Fatalf("incorrect action(s) detected: %v", err) + } + + if err := encryptiontesting.ValidateActionsVerbs(fakeKubeClient.Actions(), scenario.expectedActions); err != nil { + t.Fatalf("incorrect action(s) detected: %v", err) + } + if !reflect.DeepEqual(scenario.expectedMigratorCalls, migrator.calls) { + t.Fatalf("incorrect migrator calls:\n expected: %v\n got: %v", scenario.expectedMigratorCalls, migrator.calls) + } + if scenario.validateFunc != nil { + scenario.validateFunc(t, fakeKubeClient.Actions(), scenario.initialSecrets, scenario.targetGRs, unstructuredObjs) + } + if scenario.validateOperatorClientFunc != nil { + scenario.validateOperatorClientFunc(t, fakeOperatorClient) + } + }) + } +} + +func validateSecretsWereAnnotated(ts *testing.T, grs []schema.GroupResource, actions []clientgotesting.Action, expectedSecrets []*corev1.Secret, notExpectedSecrets []*corev1.Secret) { + ts.Helper() + + lastSeen := map[string]*corev1.Secret{} + for _, action := range actions { + if !action.Matches("update", "secrets") { + continue + } + updateAction := action.(clientgotesting.UpdateAction) + actualSecret := updateAction.GetObject().(*corev1.Secret) + lastSeen[fmt.Sprintf("%s/%s", actualSecret.Namespace, actualSecret.Name)] = actualSecret + } + + for _, expected := range expectedSecrets { + s, found := lastSeen[fmt.Sprintf("%s/%s", expected.Namespace, expected.Name)] + if !found { + ts.Errorf("missing update on %s/%s", expected.Namespace, expected.Name) + continue + } + if _, ok := s.Annotations[secrets.EncryptionSecretMigratedTimestamp]; !ok { + ts.Errorf("missing %s annotation on %s/%s", secrets.EncryptionSecretMigratedTimestamp, s.Namespace, s.Name) + } + if v, ok := s.Annotations[secrets.EncryptionSecretMigratedResources]; !ok { + ts.Errorf("missing %s annotation on %s/%s", secrets.EncryptionSecretMigratedResources, s.Namespace, s.Name) + } else { + migratedGRs := secrets.MigratedGroupResources{} + if err := json.Unmarshal([]byte(v), &migratedGRs); err != nil { + ts.Errorf("failed to unmarshal %s annotation %q of secret %s/%s: %v", secrets.EncryptionSecretMigratedResources, v, s.Namespace, s.Name, err) + continue + } + migratedGRsSet := map[string]bool{} + for _, gr := range migratedGRs.Resources { + migratedGRsSet[gr.String()] = true + } + for _, gr := range grs { + if _, found := migratedGRsSet[gr.String()]; !found { + ts.Errorf("missing resource %s in %s annotation on %s/%s", gr.String(), secrets.EncryptionSecretMigratedResources, s.Namespace, s.Name) + } + } + } + } + + for _, unexpected := range notExpectedSecrets { + _, found := lastSeen[fmt.Sprintf("%s/%s", unexpected.Namespace, unexpected.Name)] + if found { + ts.Errorf("unexpected update on %s/%s", unexpected.Namespace, unexpected.Name) + continue + } + } +} + +type finishedResultErr struct { + finished bool + result, err error +} + +type fakeMigrator struct { + calls []string + ensureReplies map[schema.GroupResource]map[string]finishedResultErr + pruneReplies map[schema.GroupResource]error +} + +func (m *fakeMigrator) EnsureMigration(gr schema.GroupResource, writeKey string) (finished bool, result error, ts time.Time, err error) { + m.calls = append(m.calls, fmt.Sprintf("ensure:%s:%s", gr, writeKey)) + r := m.ensureReplies[gr][writeKey] + return r.finished, r.result, time.Now(), r.err +} + +func (m *fakeMigrator) PruneMigration(gr schema.GroupResource) error { + m.calls = append(m.calls, fmt.Sprintf("prune:%s", gr)) + return m.pruneReplies[gr] +} + +func (m *fakeMigrator) AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced { + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/errors.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/errors.go new file mode 100644 index 00000000000..2b94e0fd95e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/errors.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package migrators + +import ( + "strings" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/utils/pointer" +) + +// isConnectionRefusedError checks if the error string include "connection refused" +// TODO: find a "go-way" to detect this error, probably using *os.SyscallError +func isConnectionRefusedError(err error) bool { + return strings.Contains(err.Error(), "connection refused") +} + +// canRetry returns false if the provided error indicates a retry is +// impossible. It returns true if the error is possibly temporary. It returns +// nil for all other error where it is unclear. +func canRetry(err error) *bool { + switch { + case err == nil: + return nil + case errors.IsNotFound(err), errors.IsMethodNotSupported(err): + return pointer.BoolPtr(false) + case errors.IsConflict(err), errors.IsServerTimeout(err), errors.IsTooManyRequests(err), net.IsProbableEOF(err), net.IsConnectionReset(err), net.IsNoRoutesError(err), isConnectionRefusedError(err): + return pointer.BoolPtr(true) + default: + return nil + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess.go new file mode 100644 index 00000000000..7196df6d8b8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess.go @@ -0,0 +1,192 @@ +package migrators + +import ( + "context" + "fmt" + "sync" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" +) + +func NewInProcessMigrator(dynamicClient dynamic.Interface, discoveryClient discovery.ServerResourcesInterface) *InProcessMigrator { + return &InProcessMigrator{ + dynamicClient: dynamicClient, + discoveryClient: discoveryClient, + running: map[schema.GroupResource]*inProcessMigration{}, + } +} + +// InProcessMigrator runs migration in-process using paging. +type InProcessMigrator struct { + dynamicClient dynamic.Interface + discoveryClient discovery.ServerResourcesInterface + + lock sync.Mutex + running map[schema.GroupResource]*inProcessMigration + + handler cache.ResourceEventHandler +} + +type inProcessMigration struct { + stopCh chan<- struct{} + doneCh <-chan struct{} + writeKey string + + // non-nil when finished. *result==nil means "no error" + result *error + // when did it finish + timestamp time.Time +} + +func (m *InProcessMigrator) EnsureMigration(gr schema.GroupResource, writeKey string) (finished bool, result error, ts time.Time, err error) { + m.lock.Lock() + defer m.lock.Unlock() + + // finished? + migration := m.running[gr] + if migration != nil && migration.writeKey == writeKey { + if migration.result == nil { + return false, nil, time.Time{}, nil + } + return true, *migration.result, migration.timestamp, nil + } + + // different key? + if migration != nil && migration.result == nil { + klog.V(2).Infof("Interrupting running migration for resource %v and write key %q", gr, migration.writeKey) + close(migration.stopCh) + + // give go routine time to update the result + m.lock.Unlock() + <-migration.doneCh + m.lock.Lock() + } + + v, err := preferredResourceVersion(m.discoveryClient, gr) + if err != nil { + return false, nil, time.Time{}, err + } + + stopCh := make(chan struct{}) + doneCh := make(chan struct{}) + m.running[gr] = &inProcessMigration{ + stopCh: stopCh, + doneCh: doneCh, + writeKey: writeKey, + } + + go m.runMigration(gr.WithVersion(v), writeKey, stopCh, doneCh) + + return false, nil, time.Time{}, nil +} + +func (m *InProcessMigrator) runMigration(gvr schema.GroupVersionResource, writeKey string, stopCh <-chan struct{}, doneCh chan<- struct{}) { + var result error + + defer close(doneCh) + defer func() { + if r := recover(); r != nil { + if err, ok := r.(error); ok { + result = err + } else { + result = fmt.Errorf("panic: %v", r) + } + } + + m.lock.Lock() + defer m.lock.Unlock() + migration := m.running[gvr.GroupResource()] + if migration == nil || migration.writeKey != writeKey { + // ok, this is not us. Should never happen. + return + } + + migration.result = &result + migration.timestamp = time.Now() + + m.handler.OnAdd(&corev1.Secret{}) // fake secret to trigger event loop of controller + }() + + ctx, cancelFn := context.WithCancel(context.Background()) + defer cancelFn() + go func() { + <-stopCh + cancelFn() + }() + + d := m.dynamicClient.Resource(gvr) + + listProcessor := newListProcessor(ctx, m.dynamicClient, func(obj *unstructured.Unstructured) error { + for { + _, updateErr := d.Namespace(obj.GetNamespace()).Update(obj, metav1.UpdateOptions{}) + if updateErr == nil || errors.IsNotFound(updateErr) || errors.IsConflict(updateErr) { + return nil + } + if retryable := canRetry(updateErr); retryable == nil || *retryable == false { + klog.Warningf("Update of %s/%s failed: %v", obj.GetNamespace(), obj.GetName(), updateErr) + return updateErr // not retryable or we don't know. Return error and controller will restart migration. + } + if seconds, delay := errors.SuggestsClientDelay(updateErr); delay && seconds > 0 { + klog.V(2).Infof("Sleeping %ds while updating %s/%s of type %v after retryable error: %v", seconds, obj.GetNamespace(), obj.GetName(), gvr, updateErr) + time.Sleep(time.Duration(seconds) * time.Second) + } + } + }) + result = listProcessor.run(gvr) +} + +func (m *InProcessMigrator) PruneMigration(gr schema.GroupResource) error { + m.lock.Lock() + defer m.lock.Unlock() + + migration := m.running[gr] + delete(m.running, gr) + + // finished? + if migration != nil && migration.result == nil { + close(migration.stopCh) + + // give go routine time to update the result + m.lock.Unlock() + <-migration.doneCh + m.lock.Lock() + } + + return nil +} + +func (m *InProcessMigrator) AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced { + m.handler = handler + return nil +} + +func preferredResourceVersion(c discovery.ServerResourcesInterface, gr schema.GroupResource) (string, error) { + resourceLists, discoveryErr := c.ServerPreferredResources() // safe to ignore error + for _, resourceList := range resourceLists { + groupVersion, err := schema.ParseGroupVersion(resourceList.GroupVersion) + if err != nil { + return "", err + } + if groupVersion.Group != gr.Group { + continue + } + for _, resource := range resourceList.APIResources { + if (len(resource.Group) == 0 || resource.Group == gr.Group) && resource.Name == gr.Resource { + if len(resource.Version) > 0 { + return resource.Version, nil + } + return groupVersion.Version, nil + } + } + } + return "", fmt.Errorf("failed to find version for %s, discoveryErr=%v", gr, discoveryErr) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_processor.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_processor.go new file mode 100644 index 00000000000..e3154f8f50f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_processor.go @@ -0,0 +1,185 @@ +package migrators + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/tools/pager" + "k8s.io/klog" +) + +const ( + defaultConcurrency = 10 +) + +// workerFunc function that is executed by workers to process a single item +type workerFunc func(*unstructured.Unstructured) error + +// listProcessor represents a type that processes resources in parallel. +// It retrieves resources from the server in batches and distributes among set of workers. +type listProcessor struct { + concurrency int + workerFn workerFunc + dynamicClient dynamic.Interface + ctx context.Context +} + +// newListProcessor creates a new instance of listProcessor +func newListProcessor(ctx context.Context, dynamicClient dynamic.Interface, workerFn workerFunc) *listProcessor { + return &listProcessor{ + concurrency: defaultConcurrency, + workerFn: workerFn, + dynamicClient: dynamicClient, + ctx: ctx, + } +} + +// run starts processing all the instance of the given GVR in batches. +// Note that this operation block until all resources have been process, we can't get the next page or the context has been cancelled +func (p *listProcessor) run(gvr schema.GroupVersionResource) error { + listPager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + for { + allResource, err := p.dynamicClient.Resource(gvr).List(opts) + if err != nil { + klog.Warningf("List of %v failed: %v", gvr, err) + if errors.IsResourceExpired(err) { + token, err := inconsistentContinueToken(err) + if err != nil { + return nil, err + } + opts.Continue = token + klog.V(2).Infof("Relisting %v after handling expired token", gvr) + continue + } else if retryable := canRetry(err); retryable == nil || *retryable == false { + return nil, err // not retryable or we don't know. Return error and controller will restart migration. + } else { + if seconds, delay := errors.SuggestsClientDelay(err); delay { + time.Sleep(time.Duration(seconds) * time.Second) + } + klog.V(2).Infof("Relisting %v after retryable error: %v", gvr, err) + continue + } + } + + migrationStarted := time.Now() + klog.V(2).Infof("Migrating %d objects of %v", len(allResource.Items), gvr) + if err = p.processList(allResource, gvr); err != nil { + klog.Warningf("Migration of %v failed after %v: %v", gvr, time.Now().Sub(migrationStarted), err) + return nil, err + } + klog.V(2).Infof("Migration of %d objects of %v finished in %v", len(allResource.Items), gvr, time.Now().Sub(migrationStarted)) + + allResource.Items = nil // do not accumulate items, this fakes the visitor pattern + return allResource, nil // leave the rest of the list intact to preserve continue token + } + })) + listPager.FullListIfExpired = false // prevent memory explosion from full list + + migrationStarted := time.Now() + if _, err := listPager.List(p.ctx, metav1.ListOptions{}); err != nil { + metrics.ObserveFailedMigration(gvr.String()) + return err + } + migrationDuration := time.Now().Sub(migrationStarted) + klog.V(2).Infof("Migration for %v finished in %v", gvr, migrationDuration) + metrics.ObserveSucceededMigration(gvr.String()) + metrics.ObserveSucceededMigrationDuration(migrationDuration.Seconds(), gvr.String()) + return nil +} + +func (p *listProcessor) processList(l *unstructured.UnstructuredList, gvr schema.GroupVersionResource) error { + workCh := make(chan *unstructured.Unstructured, p.concurrency) + ctx, cancel := context.WithCancel(p.ctx) + defer cancel() + + processed := 0 + go func() { + defer utilruntime.HandleCrash() + defer close(workCh) + for i := range l.Items { + select { + case workCh <- &l.Items[i]: + processed++ + case <-ctx.Done(): + return + } + } + }() + + var wg sync.WaitGroup + errCh := make(chan error, p.concurrency) + for i := 0; i < p.concurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := p.worker(workCh, gvr); err != nil { + errCh <- err + cancel() // stop everything when the first worker errors + } + }() + } + wg.Wait() + close(errCh) + + var errs []error + for err := range errCh { + errs = append(errs, err) + } + if len(errs) > 0 { + return utilerrors.NewAggregate(errs) + } + if processed < len(l.Items) { + return fmt.Errorf("context cancelled") + } + return nil +} + +func (p *listProcessor) worker(workCh <-chan *unstructured.Unstructured, gvr schema.GroupVersionResource) (result error) { + defer func() { + if r := recover(); r != nil { + if err, ok := r.(error); ok { + result = err + } else { + result = fmt.Errorf("panic: %v", r) + } + } + }() + + for item := range workCh { + err := p.workerFn(item) + metrics.ObserveObjectsMigrated(1, gvr.String()) + if err != nil { + return err + } + } + + return nil +} + +// inconsistentContinueToken extracts the continue token from the response which might be used to retrieve the remainder of the results +// +// Note: +// continuing with the provided token might result in an inconsistent list. Objects that were created, +// modified, or deleted between the time the first chunk was returned and now may show up in the list. +func inconsistentContinueToken(err error) (string, error) { + status, ok := err.(errors.APIStatus) + if !ok { + return "", fmt.Errorf("expected error to implement the APIStatus interface, got %v", reflect.TypeOf(err)) + } + token := status.Status().ListMeta.Continue + if len(token) == 0 { + return "", fmt.Errorf("expected non empty continue token") + } + return token, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_processor_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_processor_test.go new file mode 100644 index 00000000000..dd12aaf3525 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_processor_test.go @@ -0,0 +1,301 @@ +package migrators + +import ( + "context" + "fmt" + "reflect" + "sync" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + dynamicfake "k8s.io/client-go/dynamic/fake" + clientgotesting "k8s.io/client-go/testing" +) + +func TestInprocessProcessor(t *testing.T) { + scenarios := []struct { + name string + workerFunc func(*unstructured.Unstructured) error + validateFunc func(ts *testing.T, actions []clientgotesting.Action, count int, err error) + resources []runtime.Object + gvr schema.GroupVersionResource + }{ + // scenario 1: + { + name: "worker function is executed", + workerFunc: func(obj *unstructured.Unstructured) error { + if obj.GetKind() != "Secret" { + return fmt.Errorf("incorrect kind %v", obj.GetKind()) + } + return nil + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, count int, err error) { + if err != nil { + t.Error(err) + } + if err := validateActionsVerbs(actions, []string{"list:secrets"}); err != nil { + t.Error(err) + } + if count != 100 { + t.Errorf("workerFunc haven't seen 100 only %d", count) + } + }, + resources: func() []runtime.Object { + ret := []runtime.Object{} + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "ns1"}}) + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: "ns1"}}) + ret = append(ret, createSecrets(100)...) + return ret + }(), + gvr: schema.GroupResource{Resource: "secrets"}.WithVersion("v1"), + }, + + // scenario 2: + { + name: "handles panic", + workerFunc: func(obj *unstructured.Unstructured) error { + panic("nasty panic") + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, count int, err error) { + if err == nil { + t.Error("expected to receive an error") + } + if err := validateActionsVerbs(actions, []string{"list:secrets"}); err != nil { + t.Error(err) + } + }, + resources: func() []runtime.Object { + ret := []runtime.Object{} + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "ns1"}}) + ret = append(ret, createSecrets(100)...) + return ret + }(), + gvr: schema.GroupResource{Resource: "secrets"}.WithVersion("v1"), + }, + + // scenario 3: + { + name: "handles more than one page (default is 500 items)", + workerFunc: func(obj *unstructured.Unstructured) error { + if obj.GetKind() != "Secret" { + return fmt.Errorf("incorrect kind %v", obj.GetKind()) + } + return nil + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, count int, err error) { + if err != nil { + t.Error(err) + } + if err := validateActionsVerbs(actions, []string{"list:secrets"}); err != nil { + t.Error(err) + } + if count != 500*4 { + t.Errorf("workerFunc haven't seen all 500 * 4 only %d", count) + } + }, + resources: func() []runtime.Object { + ret := []runtime.Object{} + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "ns1"}}) + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: "ns1"}}) + ret = append(ret, createSecrets(500*4)...) + return ret + }(), + gvr: schema.GroupResource{Resource: "secrets"}.WithVersion("v1"), + }, + + // scenario 4: + { + name: "handles an empty list", + workerFunc: func(obj *unstructured.Unstructured) error { + return fmt.Errorf("an empty list passed but received %v", obj) + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, count int, err error) { + if err != nil { + t.Error(err) + } + if err := validateActionsVerbs(actions, []string{"list:secrets"}); err != nil { + t.Error(err) + } + if count != 0 { + t.Errorf("workerFunc seen %d object", count) + } + }, + resources: func() []runtime.Object { + ret := []runtime.Object{} + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "ns1"}}) + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: "ns1"}}) + return ret + }(), + gvr: schema.GroupResource{Resource: "secrets"}.WithVersion("v1"), + }, + + // scenario 5: + { + name: "stops further processing on worker error", + workerFunc: func(obj *unstructured.Unstructured) error { + if obj.GetKind() != "Secret" { + return fmt.Errorf("incorrect kind %v", obj.GetKind()) + } + return fmt.Errorf("fake error for %v", obj.GetName()) + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, count int, err error) { + if err == nil { + t.Error("expected to receive an error but none was returned") + } + if err := validateActionsVerbs(actions, []string{"list:secrets"}); err != nil { + t.Error(err) + } + // it is hard to give an exact number because we don't know how many workers are progressing + // mainly due to propagation time (closing `onWorkerErrorCtx` which propagates the stop signal to `workCh`) + if count >= 30 { + t.Errorf("workerFunc shouldn't have processed >= %d items, expected < 30 ", count) + } + }, + resources: func() []runtime.Object { + ret := []runtime.Object{} + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "ns1"}}) + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: "ns1"}}) + ret = append(ret, createSecrets(500*4)...) + return ret + }(), + gvr: schema.GroupResource{Resource: "secrets"}.WithVersion("v1"), + }, + } + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // prepare + scheme := runtime.NewScheme() + unstructuredObjs := []runtime.Object{} + for _, rawObject := range scenario.resources { + rawUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(rawObject.DeepCopyObject()) + if err != nil { + t.Fatal(err) + } + unstructured.SetNestedField(rawUnstructured, "v1", "apiVersion") + unstructured.SetNestedField(rawUnstructured, reflect.TypeOf(rawObject).Elem().Name(), "kind") + unstructuredObjs = append(unstructuredObjs, &unstructured.Unstructured{Object: rawUnstructured}) + } + dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, unstructuredObjs...) + + // act + totalCountCh := make(chan int) + listProcessor := newListProcessor(context.TODO(), dynamicClient, func(obj *unstructured.Unstructured) error { + totalCountCh <- 1 + return scenario.workerFunc(obj) + }) + + // validate + totalCount := 0 + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + defer wg.Done() + for i := range totalCountCh { + totalCount += i + } + }() + + err := listProcessor.run(scenario.gvr) + close(totalCountCh) + wg.Wait() + scenario.validateFunc(t, dynamicClient.Actions(), totalCount, err) + }) + } +} + +func TestInprocessProcessorContextCancellation(t *testing.T) { + // prepare + ctx, cancelCtxFn := context.WithCancel(context.TODO()) + lock := sync.Mutex{} + + workerFunc := func(obj *unstructured.Unstructured) error { + lock.Lock() + defer lock.Unlock() + time.Sleep(100 * time.Millisecond) + cancelCtxFn() + return nil + } + + resources := func() []runtime.Object { + ret := []runtime.Object{} + ret = append(ret, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "ns1"}}) + ret = append(ret, createSecrets(500*4)...) + return ret + }() + gvr := schema.GroupResource{Resource: "secrets"}.WithVersion("v1") + + scheme := runtime.NewScheme() + unstructuredObjs := []runtime.Object{} + for _, rawObject := range resources { + rawUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(rawObject.DeepCopyObject()) + if err != nil { + t.Fatal(err) + } + unstructured.SetNestedField(rawUnstructured, "v1", "apiVersion") + unstructured.SetNestedField(rawUnstructured, reflect.TypeOf(rawObject).Elem().Name(), "kind") + unstructuredObjs = append(unstructuredObjs, &unstructured.Unstructured{Object: rawUnstructured}) + } + dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, unstructuredObjs...) + + // act + listProcessor := newListProcessor(ctx, dynamicClient, func(obj *unstructured.Unstructured) error { + return workerFunc(obj) + }) + + // validate + testTimeoutCh := time.After(6 * time.Second) + testCompletedCh := make(chan bool) + defer close(testCompletedCh) + go func() { + err := listProcessor.run(gvr) + if err == nil { + t.Error("expected to receive an error") + } + if err := validateActionsVerbs(dynamicClient.Actions(), []string{"list:secrets"}); err != nil { + t.Error(err) + } + testCompletedCh <- true + }() + + select { + case <-testTimeoutCh: + t.Fatal("timeout waiting for context propagation") + case <-testCompletedCh: + } +} + +func validateActionsVerbs(actualActions []clientgotesting.Action, expectedActions []string) error { + actionString := func(a clientgotesting.Action) string { + return a.GetVerb() + ":" + a.GetResource().Resource + } + actionStrings := func(actions []clientgotesting.Action) []string { + res := make([]string, 0, len(actions)) + for _, a := range actions { + res = append(res, actionString(a)) + } + return res + } + + if len(actualActions) != len(expectedActions) { + return fmt.Errorf("expected to get %d actions but got %d\nexpected=%v \n got=%v", len(expectedActions), len(actualActions), expectedActions, actionStrings(actualActions)) + } + for i, a := range actualActions { + if got, expected := actionString(a), expectedActions[i]; got != expected { + return fmt.Errorf("at %d got %s, expected %s", i, got, expected) + } + } + return nil +} + +func createSecrets(count int) []runtime.Object { + ret := make([]runtime.Object, count) + for i := 0; i < count; i++ { + ret[i] = &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("secret%d", i), Namespace: "ns2"}} + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_test.go new file mode 100644 index 00000000000..c601fc65172 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/inprocess_test.go @@ -0,0 +1,241 @@ +package migrators + +import ( + "fmt" + "reflect" + "testing" + "time" + + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/discovery" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" +) + +func TestInProcessMigrator(t *testing.T) { + apiResources := []metav1.APIResource{ + { + Name: "secrets", + Namespaced: true, + Group: "", + Version: "v1", + }, + { + Name: "configmaps", + Namespaced: true, + Group: "", + Version: "v1", + }, + } + grs := []schema.GroupResource{ + {Resource: "configmaps"}, + {Resource: "secrets"}, + } + + tests := []struct { + name string + resources []runtime.Object + }{ + { + name: "no resources", + resources: nil, + }, + { + name: "secrets and configmaps", + resources: []runtime.Object{ + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: "ns1"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "secret2", Namespace: "ns2"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "secret1", Namespace: "ns1"}}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeKubeClient := fake.NewSimpleClientset() + + scheme := runtime.NewScheme() + unstructuredObjs := []runtime.Object{} + for _, rawObject := range tt.resources { + rawUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(rawObject.DeepCopyObject()) + if err != nil { + t.Fatal(err) + } + unstructured.SetNestedField(rawUnstructured, "v1", "apiVersion") + unstructured.SetNestedField(rawUnstructured, reflect.TypeOf(rawObject).Elem().Name(), "kind") + unstructuredObjs = append(unstructuredObjs, &unstructured.Unstructured{Object: rawUnstructured}) + } + dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, unstructuredObjs...) + + discoveryClient := &fakeDisco{ + delegate: fakeKubeClient.Discovery(), + serverPreferredRes: []*metav1.APIResourceList{ + { + TypeMeta: metav1.TypeMeta{}, + APIResources: apiResources, + }, + }, + } + + handler := &fakeHandler{} + + m := NewInProcessMigrator(dynamicClient, discoveryClient) + m.AddEventHandler(handler) + + t.Logf("Pruning non-existing migration") + if err := m.PruneMigration(schema.GroupResource{Resource: "configmaps"}); err != nil { + t.Errorf("unexpected prune error: %v", err) + } + + t.Logf("Migrating configmaps") + err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) { + allFinished := true + for _, gr := range grs { + finished, result, _, err := m.EnsureMigration(gr, "1") + if err != nil { + return false, err + } + if finished && result != nil { + return false, fmt.Errorf("unexpected non-nil result: %v", err) + } + if !finished && result != nil { + return false, fmt.Errorf("result must be nil if not finished, but got: %v", err) + } + if !finished { + allFinished = false + } + } + return allFinished, nil + }) + if err != nil { + t.Fatalf("unexpected ensure error: %v", err) + } + + if reflect.DeepEqual(handler.calls, []string{"update"}) { + t.Errorf("expected handler update call when finished, but got: %v", handler.calls) + } + + t.Logf("Pruning finished migration") + if err := m.PruneMigration(schema.GroupResource{Resource: "configmaps"}); err != nil { + t.Errorf("unexpected prune error: %v", err) + } + + validateMigratedResources(t, dynamicClient.Actions(), unstructuredObjs, grs) + }) + } +} + +func validateMigratedResources(ts *testing.T, actions []clientgotesting.Action, unstructuredObjs []runtime.Object, targetGRs []schema.GroupResource) { + ts.Helper() + + expectedActionsNoList := len(actions) - len(targetGRs) // subtract "list" requests + if expectedActionsNoList != len(unstructuredObjs) { + ts.Fatalf("incorrect number of resources were encrypted, expected %d, got %d", len(unstructuredObjs), expectedActionsNoList) + } + + // validate LIST requests + { + validatedListRequests := 0 + for _, gr := range targetGRs { + for _, action := range actions { + if action.Matches("list", gr.Resource) { + validatedListRequests++ + break + } + } + } + if validatedListRequests != len(targetGRs) { + ts.Fatalf("incorrect number of LIST request, expedted %d, got %d", len(targetGRs), validatedListRequests) + } + } + + // validate UPDATE requests + for _, action := range actions { + if action.GetVerb() == "update" { + unstructuredObjValidated := false + + updateAction := action.(clientgotesting.UpdateAction) + updatedObj := updateAction.GetObject().(*unstructured.Unstructured) + for _, rawUnstructuredObj := range unstructuredObjs { + expectedUnstructuredObj, ok := rawUnstructuredObj.(*unstructured.Unstructured) + if !ok { + ts.Fatalf("object %T is not *unstructured.Unstructured", expectedUnstructuredObj) + } + if equality.Semantic.DeepEqual(updatedObj, expectedUnstructuredObj) { + unstructuredObjValidated = true + break + } + } + + if !unstructuredObjValidated { + ts.Fatalf("encrypted object with kind = %s, namespace = %s and name = %s wasn't expected to be encrypted", updatedObj.GetKind(), updatedObj.GetNamespace(), updatedObj.GetName()) + } + } + } +} + +type fakeHandler struct { + calls []string +} + +func (h *fakeHandler) OnAdd(obj interface{}) { + h.calls = append(h.calls, "add") +} + +func (h *fakeHandler) OnUpdate(oldObj, newObj interface{}) { + h.calls = append(h.calls, "update") +} + +func (h *fakeHandler) OnDelete(obj interface{}) { + h.calls = append(h.calls, "delete") +} + +type fakeDisco struct { + delegate discovery.DiscoveryInterface + serverPreferredRes []*metav1.APIResourceList +} + +func (f *fakeDisco) RESTClient() interface{} { + return f.delegate +} + +func (f *fakeDisco) ServerGroups() (*metav1.APIGroupList, error) { + return f.delegate.ServerGroups() +} + +func (f *fakeDisco) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + return f.delegate.ServerResourcesForGroupVersion(groupVersion) +} + +func (f *fakeDisco) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return f.delegate.ServerGroupsAndResources() +} + +func (f *fakeDisco) ServerResources() ([]*metav1.APIResourceList, error) { + return f.delegate.ServerResources() +} + +func (f *fakeDisco) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return f.serverPreferredRes, nil +} + +func (f *fakeDisco) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return f.delegate.ServerPreferredNamespacedResources() +} + +func (f *fakeDisco) ServerVersion() (*version.Info, error) { + return f.delegate.ServerVersion() +} + +func (f *fakeDisco) OpenAPISchema() (*openapi_v2.Document, error) { + return f.delegate.OpenAPISchema() +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/kubestorageversionmigrator.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/kubestorageversionmigrator.go new file mode 100644 index 00000000000..041f8eac810 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/kubestorageversionmigrator.go @@ -0,0 +1,112 @@ +package migrators + +import ( + "fmt" + "time" + + migrationv1alpha1 "github.com/kubernetes-sigs/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + kubemigratorclient "github.com/kubernetes-sigs/kube-storage-version-migrator/pkg/clients/clientset" + migrationv1alpha1informer "github.com/kubernetes-sigs/kube-storage-version-migrator/pkg/clients/informer/migration/v1alpha1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/tools/cache" +) + +const writeKeyAnnotationKey = "encryption.apiserver.operator.openshift.io/write-key" + +func NewKubeStorageVersionMigrator(client kubemigratorclient.Interface, informer migrationv1alpha1informer.Interface, discoveryClient discovery.ServerResourcesInterface) *KubeStorageVersionMigrator { + return &KubeStorageVersionMigrator{ + discoveryClient: discoveryClient, + client: client, + informer: informer, + } +} + +// KubeStorageVersionMigrator runs migration through the kube-storage-version-migrator components, +// driven by CustomResources. +type KubeStorageVersionMigrator struct { + discoveryClient discovery.ServerResourcesInterface + client kubemigratorclient.Interface + informer migrationv1alpha1informer.Interface +} + +func (m *KubeStorageVersionMigrator) EnsureMigration(gr schema.GroupResource, writeKey string) (finished bool, result error, ts time.Time, err error) { + name := migrationResourceName(gr) + if migration, err := m.informer.StorageVersionMigrations().Lister().Get(name); err != nil && !errors.IsNotFound(err) { + return false, nil, time.Time{}, err + } else if err == nil && migration.Annotations[writeKeyAnnotationKey] == writeKey { + for _, c := range migration.Status.Conditions { + switch c.Type { + case migrationv1alpha1.MigrationSucceeded: + if c.Status == corev1.ConditionTrue { + return true, nil, c.LastUpdateTime.Time, nil + } + case migrationv1alpha1.MigrationFailed: + if c.Status == corev1.ConditionTrue { + return true, fmt.Errorf("migration of %s for key %q failed: %s", gr, writeKey, c.Message), c.LastUpdateTime.Time, nil + } + } + } + return false, nil, time.Time{}, nil + } else if err == nil { + if err := m.client.MigrationV1alpha1().StorageVersionMigrations().Delete(name, &metav1.DeleteOptions{ + Preconditions: &metav1.Preconditions{ResourceVersion: &migration.ResourceVersion}, + }); err != nil && !errors.IsNotFound(err) { + return false, nil, time.Time{}, err + } + } + + v, err := preferredResourceVersion(m.discoveryClient, gr) + if err != nil { + return false, nil, time.Time{}, err + } + + _, err = m.client.MigrationV1alpha1().StorageVersionMigrations().Create(&migrationv1alpha1.StorageVersionMigration{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: map[string]string{ + writeKeyAnnotationKey: writeKey, + }, + }, + Spec: migrationv1alpha1.StorageVersionMigrationSpec{ + Resource: migrationv1alpha1.GroupVersionResource{ + Group: gr.Group, + Version: v, + Resource: gr.Resource, + }, + }, + }) + + return false, nil, time.Time{}, err +} + +func (m *KubeStorageVersionMigrator) PruneMigration(gr schema.GroupResource) error { + name := migrationResourceName(gr) + if err := m.client.MigrationV1alpha1().StorageVersionMigrations().Delete(name, &metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) { + return err + } + return nil +} + +func (m *KubeStorageVersionMigrator) AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced { + informer := m.informer.StorageVersionMigrations().Informer() + + informer.AddEventHandler(handler) + + return []cache.InformerSynced{informer.HasSynced} +} + +func migrationResourceName(gr schema.GroupResource) string { + return fmt.Sprintf("encryption-migration-%s-%s", groupToHumanReadable(gr), gr.Resource) +} + +func groupToHumanReadable(gr schema.GroupResource) string { + group := gr.Group + if len(group) == 0 { + group = "core" + } + return group +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/metrics.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/metrics.go new file mode 100644 index 00000000000..54a2e8d1c1b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/metrics.go @@ -0,0 +1,93 @@ +package migrators + +import ( + "github.com/prometheus/client_golang/prometheus" + + k8smetrics "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const ( + namespace = "storage_migrator" + subsystem = "core_migrator" +) + +// metrics provides access to all core migrator metrics. +var metrics *migratorMetrics + +func init() { + metrics = newMigratorMetrics(legacyregistry.Register) +} + +// migratorMetrics instruments core migrator with prometheus metrics. +type migratorMetrics struct { + objectsMigrated *k8smetrics.CounterVec + migration *k8smetrics.CounterVec + migrationDuration *k8smetrics.HistogramVec +} + +// newMigratorMetrics create a new MigratorMetrics, configured with default metric names. +func newMigratorMetrics(registerFunc func(k8smetrics.Registerable) error) *migratorMetrics { + // objectMigrates is defined in kube-storave-version-migrator + objectsMigrated := k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "migrated_objects", + Help: "The total number of objects that have been migrated, labeled with the full resource name", + }, []string{"resource"}) + registerFunc(objectsMigrated) + + // migration is defined in kube-storave-version-migrator + migration := k8smetrics.NewCounterVec( + &k8smetrics.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "migrations", + Help: "The total number of completed migration, labeled with the full resource name, and the status of the migration (failed or succeeded)", + }, []string{"resource", "status"}) + registerFunc(migration) + + // migrationDuration is not defined upstream but uses the same Namespace and Subsystem + // as the other metrics that are defined in kube-storave-version-migrator + migrationDuration := k8smetrics.NewHistogramVec( + &k8smetrics.HistogramOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "migration_duration_seconds", + Help: "How long a successful migration takes in seconds, labeled with the full resource name", + Buckets: prometheus.ExponentialBuckets(120, 2, 7), + }, []string{"resource"}) + registerFunc(migrationDuration) + + return &migratorMetrics{ + objectsMigrated: objectsMigrated, + migration: migration, + migrationDuration: migrationDuration, + } +} + +func (m *migratorMetrics) Reset() { + m.objectsMigrated.Reset() + m.migration.Reset() +} + +// ObserveObjectsMigrated adds the number of migrated objects for a resource type +func (m *migratorMetrics) ObserveObjectsMigrated(added int, resource string) { + m.objectsMigrated.WithLabelValues(resource).Add(float64(added)) +} + +// ObserveSucceededMigration increments the number of successful migrations for a resource type +func (m *migratorMetrics) ObserveSucceededMigration(resource string) { + m.migration.WithLabelValues(resource, "Succeeded").Add(float64(1)) +} + +// ObserveFailedMigration increments the number of failed migrations for a resource type +func (m *migratorMetrics) ObserveFailedMigration(resource string) { + m.migration.WithLabelValues(resource, "Failed").Add(float64(1)) +} + +// ObserveMigrationDuration records migration duration in seconds for a resource type +func (m *migratorMetrics) ObserveSucceededMigrationDuration(seconds float64, resource string) { + m.migrationDuration.WithLabelValues(resource).Observe(seconds) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/types.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/types.go new file mode 100644 index 00000000000..c6ffcf336fa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators/types.go @@ -0,0 +1,27 @@ +package migrators + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" +) + +// Migrator is a resource migration mechanism. +type Migrator interface { + // EnsureMigration starts a migration if it does not exist. If a migration of + // the same write-key exists and is finished (with or without error), nothing happens. + // If a migration of another key exists, that migration is deleted first before + // starting a new one. This function is idem-potent as long as a running or finished + // migration is not pruned. + // If finished is true, result is the result of the migration, with nil meaning that it + // finished successfully. The timestamp shows when it has been finished. + EnsureMigration(gr schema.GroupResource, writeKey string) (finished bool, result error, ts time.Time, err error) + // PruneMigration removes a migration, independently whether it is running or finished, + // with error or not. If there is no migration, this must not return an error. + PruneMigration(gr schema.GroupResource) error + + // AddEventHandler registers a event handler whenever the resources change + // that might influence the result of Migrations(). + AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/prune_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/prune_controller.go new file mode 100644 index 00000000000..a10073d17dc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/prune_controller.go @@ -0,0 +1,236 @@ +package controllers + +import ( + "fmt" + "sort" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + pruneWorkKey = "key" + keepNumberOfSecrets = 10 +) + +// pruneController prevents an unbounded growth of old encryption keys. +// For a given resource, if there are more than ten keys which have been migrated, +// this controller will delete the oldest migrated keys until there are ten migrated +// keys total. These keys are safe to delete since no data in etcd is encrypted using +// them. Keeping a small number of old keys around is meant to help facilitate +// decryption of old backups (and general precaution). +type pruneController struct { + operatorClient operatorv1helpers.OperatorClient + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder + + preRunCachesSynced []cache.InformerSynced + + encryptedGRs []schema.GroupResource + + encryptionSecretSelector metav1.ListOptions + + deployer statemachine.Deployer + secretClient corev1client.SecretsGetter +} + +func NewPruneController( + deployer statemachine.Deployer, + operatorClient operatorv1helpers.OperatorClient, + kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces, + secretClient corev1client.SecretsGetter, + encryptionSecretSelector metav1.ListOptions, + eventRecorder events.Recorder, + encryptedGRs []schema.GroupResource, +) *pruneController { + c := &pruneController{ + operatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "EncryptionPruneController"), + eventRecorder: eventRecorder.WithComponentSuffix("encryption-prune-controller"), // TODO unused + + encryptedGRs: encryptedGRs, + + encryptionSecretSelector: encryptionSecretSelector, + deployer: deployer, + secretClient: secretClient, + } + + c.preRunCachesSynced = setUpInformers(deployer, operatorClient, kubeInformersForNamespaces, c.eventHandler()) + + return c +} + +func (c *pruneController) sync() error { + if ready, err := shouldRunEncryptionController(c.operatorClient); err != nil || !ready { + return err // we will get re-kicked when the operator status updates + } + + configError := c.deleteOldMigratedSecrets() + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: "EncryptionPruneControllerDegraded", + Status: operatorv1.ConditionFalse, + } + if configError != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = configError.Error() + } + if _, _, updateError := operatorv1helpers.UpdateStatus(c.operatorClient, operatorv1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + + return configError +} + +func (c *pruneController) deleteOldMigratedSecrets() error { + _, desiredEncryptionConfig, _, isProgressingReason, err := statemachine.GetEncryptionConfigAndState(c.deployer, c.secretClient, c.encryptionSecretSelector, c.encryptedGRs) + if err != nil { + return err + } + if len(isProgressingReason) > 0 { + c.queue.AddAfter(migrationWorkKey, 2*time.Minute) + return nil + } + + allUsedKeys := make([]state.KeyState, 0, len(desiredEncryptionConfig)) + for _, grKeys := range desiredEncryptionConfig { + allUsedKeys = append(allUsedKeys, grKeys.ReadKeys...) + } + + allSecrets, err := c.secretClient.Secrets("openshift-config-managed").List(c.encryptionSecretSelector) + if err != nil { + return err + } + + // sort by keyID + encryptionSecrets := make([]*corev1.Secret, 0, len(allSecrets.Items)) + for _, s := range allSecrets.Items { + encryptionSecrets = append(encryptionSecrets, s.DeepCopy()) // don't use &s because it is constant through-out the loop + } + sort.Slice(encryptionSecrets, func(i, j int) bool { + iKeyID, _ := state.NameToKeyID(encryptionSecrets[i].Name) + jKeyID, _ := state.NameToKeyID(encryptionSecrets[j].Name) + return iKeyID > jKeyID + }) + + var deleteErrs []error + skippedKeys := 0 + deletedKeys := 0 +NextEncryptionSecret: + for _, s := range encryptionSecrets { + k, err := secrets.ToKeyState(s) + if err == nil { + // ignore invalid keys, check whether secret is used + for _, us := range allUsedKeys { + if state.EqualKeyAndEqualID(&us, &k) { + continue NextEncryptionSecret + } + } + } + + // skip the most recent unused secrets around + if skippedKeys < keepNumberOfSecrets { + skippedKeys++ + continue + } + + // any secret that isn't a read key isn't used. just delete them. + // two phase delete: finalizer, then delete + + // remove our finalizer if it is present + secret := s.DeepCopy() + if finalizers := sets.NewString(secret.Finalizers...); finalizers.Has(secrets.EncryptionSecretFinalizer) { + delete(finalizers, secrets.EncryptionSecretFinalizer) + secret.Finalizers = finalizers.List() + var updateErr error + secret, updateErr = c.secretClient.Secrets("openshift-config-managed").Update(secret) + deleteErrs = append(deleteErrs, updateErr) + if updateErr != nil { + continue + } + } + + // remove the actual secret + if err := c.secretClient.Secrets("openshift-config-managed").Delete(secret.Name, nil); err != nil { + deleteErrs = append(deleteErrs, err) + } else { + deletedKeys++ + klog.V(2).Infof("Successfully pruned secret %s/%s", secret.Namespace, secret.Name) + } + } + if deletedKeys > 0 { + c.eventRecorder.Eventf("EncryptionKeysPruned", "Successfully pruned %d secrets", deletedKeys) + } + return utilerrors.FilterOut(utilerrors.NewAggregate(deleteErrs), errors.IsNotFound) +} + +func (c *pruneController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting EncryptionPruneController") + defer klog.Infof("Shutting down EncryptionPruneController") + if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // only start one worker + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *pruneController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *pruneController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *pruneController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(pruneWorkKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(pruneWorkKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(pruneWorkKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/prune_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/prune_controller_test.go new file mode 100644 index 00000000000..34df469e912 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/prune_controller_test.go @@ -0,0 +1,255 @@ +package controllers + +import ( + "crypto/rand" + "encoding/base64" + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + + operatorv1 "github.com/openshift/api/operator/v1" + encryptiondeployer "github.com/openshift/library-go/pkg/operator/encryption/deployer" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" + encryptiontesting "github.com/openshift/library-go/pkg/operator/encryption/testing" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func TestPruneController(t *testing.T) { + scenarios := []struct { + name string + initialSecrets []*corev1.Secret + encryptionSecretSelector metav1.ListOptions + targetNamespace string + targetGRs []schema.GroupResource + // expectedActions holds actions to be verified in the form of "verb:resource:namespace" + expectedActions []string + expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration + validateFunc func(ts *testing.T, actions []clientgotesting.Action, initialSecrets []*corev1.Secret) + }{ + { + name: "no-op only 10 keys were migrated", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialSecrets: func() []*corev1.Secret { + ns := "kms" + all := []*corev1.Secret{} + all = append(all, createMigratedEncryptionKeySecretsWithRndKey(t, 10, ns, "secrets")...) + all = append(all, encryptiontesting.CreateEncryptionKeySecretWithRawKey(ns, nil, 11, []byte("cfbbae883984944e48d25590abdfd300"))) + return all + }(), + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "list:secrets:openshift-config-managed"}, + }, + + { + name: "15 keys were migrated, 2 of them are used, 10 are kept, the 3 most oldest are pruned", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialSecrets: createMigratedEncryptionKeySecretsWithRndKey(t, 15, "kms", "secrets"), + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "list:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "delete:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "delete:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "delete:secrets:openshift-config-managed", + "create:events:kms", + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, initialSecrets []*corev1.Secret) { + validateSecretsWerePruned(ts, actions, initialSecrets[:3]) + }, + }, + + { + name: "no-op the migrated keys don't match the selector", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialSecrets: func() []*corev1.Secret { + return createMigratedEncryptionKeySecretsWithRndKey(t, 15, "not-kms", "secrets") + }(), + encryptionSecretSelector: metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", "encryption.apiserver.operator.openshift.io/component", "kms")}, + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "list:secrets:openshift-config-managed", + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // setup + fakeOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + OperatorStatus: operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "EncryptionPruneControllerDegraded", + Status: "False", + }, + }, + }, + NodeStatuses: []operatorv1.NodeStatus{ + {NodeName: "node-1"}, + }, + }, + nil, + nil, + ) + + rawSecrets := []runtime.Object{} + for _, initialSecret := range scenario.initialSecrets { + rawSecrets = append(rawSecrets, initialSecret) + } + + fakePod := encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1") + + writeKeyRaw := []byte("71ea7c91419a68fd1224f88d50316b4e") // NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU= + writeKeyID := uint64(len(scenario.initialSecrets) + 1) + writeKeySecret := encryptiontesting.CreateEncryptionKeySecretWithRawKey(scenario.targetNamespace, nil, writeKeyID, writeKeyRaw) + + initialKeys := []state.KeyState{} + for _, s := range scenario.initialSecrets { + km, err := secrets.ToKeyState(s) + if err != nil { + t.Fatal(err) + } + initialKeys = append(initialKeys, km) + } + + encryptionConfig := func() *corev1.Secret { + additionalReadKeys := state.KeysWithPotentiallyPersistedDataAndNextReadKey(scenario.targetGRs, state.SortRecentFirst(initialKeys)) + var additionaConfigReadKeys []apiserverconfigv1.Key + for _, rk := range additionalReadKeys { + additionaConfigReadKeys = append(additionaConfigReadKeys, apiserverconfigv1.Key{ + Name: rk.Key.Name, + Secret: rk.Key.Secret, + }) + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{{ + Resource: "secrets", + Keys: append([]apiserverconfigv1.Key{ + { + Name: fmt.Sprintf("%d", writeKeyID), + Secret: base64.StdEncoding.EncodeToString(writeKeyRaw), + }, + }, additionaConfigReadKeys...), + }}) + ec.APIVersion = corev1.SchemeGroupVersion.String() + return createEncryptionCfgSecret(t, "kms", "1", ec) + }() + fakeKubeClient := fake.NewSimpleClientset(append(rawSecrets, writeKeySecret, fakePod, encryptionConfig)...) + eventRecorder := events.NewRecorder(fakeKubeClient.CoreV1().Events(scenario.targetNamespace), "test-encryptionKeyController", &corev1.ObjectReference{}) + // we pass "openshift-config-managed" and $targetNamespace ns because the controller creates an informer for secrets in that namespace. + // note that the informer factory is not used in the test - it's only needed to create the controller + kubeInformers := v1helpers.NewKubeInformersForNamespaces(fakeKubeClient, "openshift-config-managed", scenario.targetNamespace) + fakeSecretClient := fakeKubeClient.CoreV1() + + deployer, err := encryptiondeployer.NewRevisionLabelPodDeployer("revision", scenario.targetNamespace, kubeInformers, nil, fakeKubeClient.CoreV1(), fakeSecretClient, encryptiondeployer.StaticPodNodeProvider{OperatorClient: fakeOperatorClient}) + if err != nil { + t.Fatal(err) + } + + target := NewPruneController( + deployer, + fakeOperatorClient, + kubeInformers, + fakeSecretClient, + scenario.encryptionSecretSelector, + eventRecorder, + scenario.targetGRs, + ) + + // act + err = target.sync() + + // validate + if err != nil { + t.Fatal(err) + } + if err := encryptiontesting.ValidateActionsVerbs(fakeKubeClient.Actions(), scenario.expectedActions); err != nil { + t.Fatalf("incorrect action(s) detected: %v", err) + } + if scenario.validateFunc != nil { + scenario.validateFunc(t, fakeKubeClient.Actions(), scenario.initialSecrets) + } + }) + } +} + +func validateSecretsWerePruned(ts *testing.T, actions []clientgotesting.Action, expectedDeletedSecrets []*corev1.Secret) { + ts.Helper() + + deletedSecretsCount := 0 + finalizersRemovedCount := 0 + for _, action := range actions { + if action.GetVerb() == "update" { + updateAction := action.(clientgotesting.UpdateAction) + actualSecret := updateAction.GetObject().(*corev1.Secret) + for _, expectedDeletedSecret := range expectedDeletedSecrets { + if expectedDeletedSecret.Name == actualSecret.GetName() { + expectedDeletedSecretsCpy := expectedDeletedSecret.DeepCopy() + expectedDeletedSecretsCpy.Finalizers = []string{} + if equality.Semantic.DeepEqual(actualSecret, expectedDeletedSecretsCpy) { + finalizersRemovedCount++ + break + } + } + } + } + if action.GetVerb() == "delete" { + deleteAction := action.(clientgotesting.DeleteAction) + for _, expectedDeletedSecret := range expectedDeletedSecrets { + if expectedDeletedSecret.Name == deleteAction.GetName() && expectedDeletedSecret.Namespace == deleteAction.GetNamespace() { + deletedSecretsCount++ + } + } + } + } + if deletedSecretsCount != len(expectedDeletedSecrets) { + ts.Errorf("%d key(s) were deleted but %d were expected to be deleted", deletedSecretsCount, len(expectedDeletedSecrets)) + } + if finalizersRemovedCount != len(expectedDeletedSecrets) { + ts.Errorf("expected to see %d finalizers removed but got %d", len(expectedDeletedSecrets), finalizersRemovedCount) + } +} + +func createMigratedEncryptionKeySecretsWithRndKey(ts *testing.T, count int, namespace, resource string) []*corev1.Secret { + ts.Helper() + rawKey := make([]byte, 32) + if _, err := rand.Read(rawKey); err != nil { + ts.Fatal(err) + } + ret := []*corev1.Secret{} + for i := 1; i <= count; i++ { + s := encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey(namespace, []schema.GroupResource{{Group: "", Resource: resource}}, uint64(i), rawKey, time.Now()) + ret = append(ret, s) + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/state_controller.go new file mode 100644 index 00000000000..b2fd898dedc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/state_controller.go @@ -0,0 +1,270 @@ +package controllers + +import ( + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/encryption/state" + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const stateWorkKey = "key" + +// stateController is responsible for creating a single secret in +// openshift-config-managed with the name destName. This single secret +// contains the complete EncryptionConfiguration that is consumed by the API +// server that is performing the encryption. Thus this secret represents +// the current state of all resources in encryptedGRs. Every encryption key +// that matches encryptionSecretSelector is included in this final secret. +// This secret is synced into targetNamespace at a static location. This +// indirection allows the cluster to recover from the deletion of targetNamespace. +// See getResourceConfigs for details on how the raw state of all keys +// is converted into a single encryption config. The logic for determining +// the current write key is of special interest. +type stateController struct { + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder + preRunCachesSynced []cache.InformerSynced + + encryptedGRs []schema.GroupResource + component string + encryptionSecretSelector metav1.ListOptions + + operatorClient operatorv1helpers.OperatorClient + secretClient corev1client.SecretsGetter + deployer statemachine.Deployer +} + +func NewStateController( + component string, + deployer statemachine.Deployer, + operatorClient operatorv1helpers.OperatorClient, + kubeInformersForNamespaces operatorv1helpers.KubeInformersForNamespaces, + secretClient corev1client.SecretsGetter, + encryptionSecretSelector metav1.ListOptions, + eventRecorder events.Recorder, + encryptedGRs []schema.GroupResource, +) *stateController { + c := &stateController{ + operatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "EncryptionStateController"), + eventRecorder: eventRecorder.WithComponentSuffix("encryption-state-controller"), + + encryptedGRs: encryptedGRs, + component: component, + + encryptionSecretSelector: encryptionSecretSelector, + secretClient: secretClient, + deployer: deployer, + } + + c.preRunCachesSynced = setUpInformers(deployer, operatorClient, kubeInformersForNamespaces, c.eventHandler()) + + return c +} + +func (c *stateController) sync() error { + if ready, err := shouldRunEncryptionController(c.operatorClient); err != nil || !ready { + return err // we will get re-kicked when the operator status updates + } + + configError := c.generateAndApplyCurrentEncryptionConfigSecret() + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: "EncryptionStateControllerDegraded", + Status: operatorv1.ConditionFalse, + } + if configError != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = configError.Error() + } + if _, _, updateError := operatorv1helpers.UpdateStatus(c.operatorClient, operatorv1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + + return configError +} + +type eventWithReason struct { + reason string + message string +} + +func (c *stateController) generateAndApplyCurrentEncryptionConfigSecret() error { + currentConfig, desiredEncryptionState, encryptionSecrets, transitioningReason, err := statemachine.GetEncryptionConfigAndState(c.deployer, c.secretClient, c.encryptionSecretSelector, c.encryptedGRs) + if err != nil { + return err + } + if len(transitioningReason) > 0 { + c.queue.AddAfter(stateWorkKey, 2*time.Minute) + return nil + } + + if currentConfig == nil && len(encryptionSecrets) == 0 { + // we depend on the key controller to create the first key to bootstrap encryption. + // Later-on either the config exists or there are keys, even in the case of disabled + // encryption via the apiserver config. + return nil + } + + desiredEncryptionConfig := encryptionconfig.FromEncryptionState(desiredEncryptionState) + changed, err := c.applyEncryptionConfigSecret(desiredEncryptionConfig) + if err != nil { + return err + } + + if changed { + currentEncryptionConfig, _ := encryptionconfig.ToEncryptionState(currentConfig, encryptionSecrets) + if actionEvents := eventsFromEncryptionConfigChanges(currentEncryptionConfig, desiredEncryptionState); len(actionEvents) > 0 { + for _, event := range actionEvents { + c.eventRecorder.Eventf(event.reason, event.message) + } + } + } + return nil +} + +func (c *stateController) applyEncryptionConfigSecret(encryptionConfig *apiserverconfigv1.EncryptionConfiguration) (bool, error) { + s, err := encryptionconfig.ToSecret("openshift-config-managed", fmt.Sprintf("%s-%s", encryptionconfig.EncryptionConfSecretName, c.component), encryptionConfig) + if err != nil { + return false, err + } + + _, changed, applyErr := resourceapply.ApplySecret(c.secretClient, c.eventRecorder, s) + return changed, applyErr +} + +func (c *stateController) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting EncryptionStateController") + defer klog.Infof("Shutting down EncryptionStateController") + if !cache.WaitForCacheSync(stopCh, c.preRunCachesSynced...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync for EncryptionStateController")) + return + } + + // only start one worker + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *stateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *stateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with: %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *stateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(stateWorkKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(stateWorkKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(stateWorkKey) }, + } +} + +// eventsFromEncryptionConfigChanges return slice of event reasons with messages corresponding to a difference between current and desired encryption state. +func eventsFromEncryptionConfigChanges(current, desired map[schema.GroupResource]state.GroupResourceState) []eventWithReason { + var result []eventWithReason + // handle removals from current first + for currentGroupResource := range current { + if _, exists := desired[currentGroupResource]; !exists { + result = append(result, eventWithReason{ + reason: "EncryptionResourceRemoved", + message: fmt.Sprintf("Resource %q was removed from encryption config", currentGroupResource), + }) + } + } + for desiredGroupResource, desiredGroupResourceState := range desired { + currentGroupResource, exists := current[desiredGroupResource] + if !exists { + keyMessage := "without write key" + if desiredGroupResourceState.HasWriteKey() { + keyMessage = fmt.Sprintf("with write key %q", desiredGroupResourceState.WriteKey.Key.Name) + } + result = append(result, eventWithReason{ + reason: "EncryptionResourceAdded", + message: fmt.Sprintf("Resource %q was added to encryption config %s", desiredGroupResource, keyMessage), + }) + continue + } + if !currentGroupResource.HasWriteKey() && desiredGroupResourceState.HasWriteKey() { + result = append(result, eventWithReason{ + reason: "EncryptionKeyPromoted", + message: fmt.Sprintf("Promoting key %q for resource %q to write key", desiredGroupResourceState.WriteKey.Key.Name, desiredGroupResource), + }) + } + if currentGroupResource.HasWriteKey() && !desiredGroupResourceState.HasWriteKey() { + result = append(result, eventWithReason{ + reason: "EncryptionKeyRemoved", + message: fmt.Sprintf("Removing key %q for resource %q to write key", currentGroupResource.WriteKey.Key.Name, desiredGroupResource), + }) + } + if currentGroupResource.HasWriteKey() && desiredGroupResourceState.HasWriteKey() { + if currentGroupResource.WriteKey.ExternalReason != desiredGroupResourceState.WriteKey.ExternalReason { + result = append(result, eventWithReason{ + reason: "EncryptionWriteKeyTriggeredExternal", + message: fmt.Sprintf("Triggered key %q for resource %q because %s", currentGroupResource.WriteKey.Key.Name, desiredGroupResource, desiredGroupResourceState.WriteKey.ExternalReason), + }) + } + if currentGroupResource.WriteKey.InternalReason != desiredGroupResourceState.WriteKey.InternalReason { + result = append(result, eventWithReason{ + reason: "EncryptionWriteKeyTriggeredInternal", + message: fmt.Sprintf("Triggered key %q for resource %q because %s", currentGroupResource.WriteKey.Key.Name, desiredGroupResource, desiredGroupResourceState.WriteKey.InternalReason), + }) + } + if !state.EqualKeyAndEqualID(¤tGroupResource.WriteKey, &desiredGroupResourceState.WriteKey) { + result = append(result, eventWithReason{ + reason: "EncryptionWriteKeyChanged", + message: fmt.Sprintf("Write key %q for resource %q changed", currentGroupResource.WriteKey.Key.Name, desiredGroupResource), + }) + } + } + if len(currentGroupResource.ReadKeys) != len(desiredGroupResourceState.ReadKeys) { + result = append(result, eventWithReason{ + reason: "EncryptionReadKeysChanged", + message: fmt.Sprintf("Number of read keys for resource %q changed from %d to %d", desiredGroupResource, len(currentGroupResource.ReadKeys), len(desiredGroupResourceState.ReadKeys)), + }) + } + } + return result +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/state_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/state_controller_test.go new file mode 100644 index 00000000000..115f3c8b4fe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/controllers/state_controller_test.go @@ -0,0 +1,794 @@ +package controllers + +import ( + "encoding/base64" + "errors" + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/diff" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + + operatorv1 "github.com/openshift/api/operator/v1" + encryptiondeployer "github.com/openshift/library-go/pkg/operator/encryption/deployer" + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/encryption/state" + encryptiontesting "github.com/openshift/library-go/pkg/operator/encryption/testing" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func TestStateController(t *testing.T) { + scenarios := []struct { + name string + initialResources []runtime.Object + encryptionSecretSelector metav1.ListOptions + targetNamespace string + targetGRs []schema.GroupResource + // expectedActions holds actions to be verified in the form of "verb:resource:namespace" + expectedActions []string + expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration + validateFunc func(ts *testing.T, actions []clientgotesting.Action, destName string, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration) + validateOperatorClientFunc func(ts *testing.T, operatorClient v1helpers.OperatorClient) + expectedError error + }{ + // scenario 1: validates if "encryption-config-kms" secret with EncryptionConfiguration in "openshift-config-managed" namespace + // was not created when no secrets with encryption keys are present in that namespace. + { + name: "no secret with EncryptionConfig is created when there are no secrets with the encryption keys", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + }, + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed"}, + }, + + // scenario 2: validates if "encryption-config-kms" secret with EncryptionConfiguration in "openshift-config-managed" namespace is created, + // it also checks the content and the order of encryption providers, this test expects identity first and aescbc second + { + name: "secret with EncryptionConfig is created without a write key", + targetNamespace: "kms", + encryptionSecretSelector: metav1.ListOptions{LabelSelector: "encryption.apiserver.operator.openshift.io/component=kms"}, + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 1, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "get:secrets:openshift-config-managed", "create:secrets:openshift-config-managed", "create:events:kms", "create:events:kms"}, + expectedEncryptionCfg: encryptiontesting.CreateEncryptionCfgNoWriteKey("1", "NjFkZWY5NjRmYjk2N2Y1ZDdjNDRhMmFmOGRhYjY4NjU=", "secrets"), + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, destName string, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration) { + wasSecretValidated := false + for _, action := range actions { + if action.Matches("create", "secrets") { + createAction := action.(clientgotesting.CreateAction) + actualSecret := createAction.GetObject().(*corev1.Secret) + err := validateSecretWithEncryptionConfig(actualSecret, expectedEncryptionCfg, destName) + if err != nil { + ts.Fatalf("failed to verfy the encryption config, due to %v", err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + }, + }, + + // scenario 3 + { + name: "secret with EncryptionConfig is created and it contains a single write key", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 34, []byte("171582a0fcd6c5fdb65cbf5a3e9249d7")), + func() *corev1.Secret { + ec := encryptiontesting.CreateEncryptionCfgNoWriteKey("34", "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", "secrets") + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + return ecs + }(), + }, + expectedEncryptionCfg: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "create:secrets:openshift-config-managed", + "create:events:kms", + "create:events:kms", + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, destName string, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration) { + wasSecretValidated := false + for _, action := range actions { + if action.Matches("create", "secrets") { + createAction := action.(clientgotesting.CreateAction) + actualSecret := createAction.GetObject().(*corev1.Secret) + err := validateSecretWithEncryptionConfig(actualSecret, expectedEncryptionCfg, destName) + if err != nil { + ts.Fatalf("failed to verfy the encryption config, due to %v", err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + }, + }, + + // scenario 4 + { + name: "no-op when no key is transitioning", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 34, []byte("171582a0fcd6c5fdb65cbf5a3e9249d7"), time.Now()), + func() *corev1.Secret { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + return ecs + }(), + func() *corev1.Secret { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "openshift-config-managed", "1", ec) + ecs.Name = "encryption-config-kms" + return ecs + }(), + }, + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "get:secrets:openshift-config-managed"}, + }, + + // scenario 5 + { + name: "the key with ID=34 is transitioning (observed as a read key) so it is used as a write key in the EncryptionConfig", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 33, []byte("171582a0fcd6c5fdb65cbf5a3e9249d7")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 34, []byte("dda090c18770163d57d6aaca85f7b3a5")), + func() *corev1.Secret { // encryption config in kms namespace + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "33", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + { + Name: "34", + Secret: "ZGRhMDkwYzE4NzcwMTYzZDU3ZDZhYWNhODVmN2IzYTU=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + return ecs + }(), + func() *corev1.Secret { // encryption config in openshift-config-managed + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "33", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + { + Name: "34", + Secret: "ZGRhMDkwYzE4NzcwMTYzZDU3ZDZhYWNhODVmN2IzYTU=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "openshift-config-managed", "1", ec) + ecs.Name = "encryption-config-kms" + return ecs + }(), + }, + expectedEncryptionCfg: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "ZGRhMDkwYzE4NzcwMTYzZDU3ZDZhYWNhODVmN2IzYTU=", + }, + { + Name: "33", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "create:events:kms", + "create:events:kms", + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, destName string, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration) { + wasSecretValidated := false + for _, action := range actions { + if action.Matches("update", "secrets") { + updateAction := action.(clientgotesting.UpdateAction) + actualSecret := updateAction.GetObject().(*corev1.Secret) + err := validateSecretWithEncryptionConfig(actualSecret, expectedEncryptionCfg, destName) + if err != nil { + ts.Fatalf("failed to verfy the encryption config, due to %v", err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + }, + }, + + // scenario 6 + { + name: "checks if the order of the keys is preserved and that they read keys are pruned - all migrated", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 31, []byte("a1f1b3e36c477d91ea85af0f32358f70")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 32, []byte("42b07b385a0edee268f1ac41cfc53857")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 33, []byte("b0af82240e10c032fd9bbbedd3b5955a")), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 34, []byte("1c06e8517890c8dc44f627905efc86b8"), time.Now()), + func() *corev1.Secret { // encryption config in kms namespace + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MWMwNmU4NTE3ODkwYzhkYzQ0ZjYyNzkwNWVmYzg2Yjg=", + }, + { + Name: "33", + Secret: "YjBhZjgyMjQwZTEwYzAzMmZkOWJiYmVkZDNiNTk1NWE=", + }, + { + Name: "32", + Secret: "NDJiMDdiMzg1YTBlZGVlMjY4ZjFhYzQxY2ZjNTM4NTc=", + }, + { + Name: "31", + Secret: "YTFmMWIzZTM2YzQ3N2Q5MWVhODVhZjBmMzIzNThmNzA=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + return ecs + }(), + func() *corev1.Secret { // encryption config in openshift-config-managed namespace + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MWMwNmU4NTE3ODkwYzhkYzQ0ZjYyNzkwNWVmYzg2Yjg=", + }, + { + Name: "33", + Secret: "YjBhZjgyMjQwZTEwYzAzMmZkOWJiYmVkZDNiNTk1NWE=", + }, + { + Name: "32", + Secret: "NDJiMDdiMzg1YTBlZGVlMjY4ZjFhYzQxY2ZjNTM4NTc=", + }, + { + Name: "31", + Secret: "YTFmMWIzZTM2YzQ3N2Q5MWVhODVhZjBmMzIzNThmNzA=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "openshift-config-managed", "1", ec) + ecs.Name = "encryption-config-kms" + return ecs + }(), + }, + expectedEncryptionCfg: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MWMwNmU4NTE3ODkwYzhkYzQ0ZjYyNzkwNWVmYzg2Yjg=", + }, + { + Name: "33", + Secret: "YjBhZjgyMjQwZTEwYzAzMmZkOWJiYmVkZDNiNTk1NWE=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "get:secrets:openshift-config-managed", "update:secrets:openshift-config-managed", "create:events:kms", "create:events:kms"}, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, destName string, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration) { + wasSecretValidated := false + for _, action := range actions { + if action.Matches("update", "secrets") { + updateAction := action.(clientgotesting.UpdateAction) + actualSecret := updateAction.GetObject().(*corev1.Secret) + err := validateSecretWithEncryptionConfig(actualSecret, expectedEncryptionCfg, destName) + if err != nil { + ts.Fatalf("failed to verfy the encryption config, due to %v", err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + }, + }, + + // scenario 7 + { + name: "checks if the order of the keys is preserved - with a key that is transitioning", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 31, []byte("a1f1b3e36c477d91ea85af0f32358f70")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 32, []byte("42b07b385a0edee268f1ac41cfc53857")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 33, []byte("b0af82240e10c032fd9bbbedd3b5955a")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 34, []byte("1c06e8517890c8dc44f627905efc86b8")), + func() *corev1.Secret { // encryption config in kms namespace + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "33", + Secret: base64.StdEncoding.EncodeToString([]byte("b0af82240e10c032fd9bbbedd3b5955a")), + }, + { + Name: "34", + Secret: base64.StdEncoding.EncodeToString([]byte("1c06e8517890c8dc44f627905efc86b8")), + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + return ecs + }(), + func() *corev1.Secret { // encryption config in openshift-config-managed namespace + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "33", + Secret: base64.StdEncoding.EncodeToString([]byte("b0af82240e10c032fd9bbbedd3b5955a")), + }, + { + Name: "34", + Secret: base64.StdEncoding.EncodeToString([]byte("1c06e8517890c8dc44f627905efc86b8")), + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "openshift-config-managed", "1", ec) + ecs.Name = "encryption-config-kms" + return ecs + }(), + }, + expectedEncryptionCfg: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: base64.StdEncoding.EncodeToString([]byte("1c06e8517890c8dc44f627905efc86b8")), + }, + { + Name: "33", + Secret: base64.StdEncoding.EncodeToString([]byte("b0af82240e10c032fd9bbbedd3b5955a")), + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + expectedActions: []string{ + "list:pods:kms", + "get:secrets:kms", + "list:secrets:openshift-config-managed", + "get:secrets:openshift-config-managed", + "update:secrets:openshift-config-managed", + "create:events:kms", + "create:events:kms", + }, + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, destName string, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration) { + wasSecretValidated := false + for _, action := range actions { + if action.Matches("update", "secrets") { + updateAction := action.(clientgotesting.UpdateAction) + actualSecret := updateAction.GetObject().(*corev1.Secret) + err := validateSecretWithEncryptionConfig(actualSecret, expectedEncryptionCfg, destName) + if err != nil { + ts.Fatalf("failed to verfy the encryption config, due to %v", err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + }, + }, + + // scenario 8 + // + // BUG: this test simulates deletion of an encryption config in the target ns - the encryption config had a single secret + // as a result a new encryption config is created with a single read key - that effectively means that the encryption was turned off (temporarily) + { + name: "no encryption cfg in the target ns (was deleted)", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 34, []byte("171582a0fcd6c5fdb65cbf5a3e9249d7"), time.Now()), + func() *corev1.Secret { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + ecs := createEncryptionCfgSecret(t, "openshift-config-managed", "1", ec) + ecs.Name = "encryption-config-kms" + return ecs + }(), + }, + expectedEncryptionCfg: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + validateFunc: func(ts *testing.T, actions []clientgotesting.Action, destName string, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration) { + // TODO: fix the temporary identity key on config reconstruction in getDesiredEncryptionState + /* + wasSecretValidated := false + for _, action := range actions { + if action.Matches("update", "secrets") { + updateAction := action.(clientgotesting.UpdateAction) + actualSecret := updateAction.GetObject().(*corev1.Secret) + err := validateSecretWithEncryptionConfig(actualSecret, expectedEncryptionCfg, destName) + if err != nil { + ts.Fatalf("failed to verfy the encryption config, due to %v", err) + } + wasSecretValidated = true + break + } + } + if !wasSecretValidated { + ts.Errorf("the secret wasn't created and validated") + } + */ + }, + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "get:secrets:openshift-config-managed", "update:secrets:openshift-config-managed", "create:events:kms", "create:events:kms"}, + }, + + // scenario 9 + // + // verifies if removing a target GR doesn't have effect - we will keep encrypting that GR + { + name: "a user can't stop encrypting config maps", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, 34, []byte("171582a0fcd6c5fdb65cbf5a3e9249d7"), time.Now()), + func() *corev1.Secret { // encryption config in kms namespace + keysRes := []encryptiontesting.EncryptionKeysResourceTuple{ + { + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + { + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey(keysRes) + ecs := createEncryptionCfgSecret(t, "kms", "1", ec) + return ecs + }(), + func() *corev1.Secret { // encryption config in openshift-config-managed namespace + keysRes := []encryptiontesting.EncryptionKeysResourceTuple{ + { + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + { + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey(keysRes) + ecs := createEncryptionCfgSecret(t, "openshift-config-managed", "1", ec) + ecs.Name = "encryption-config-kms" + return ecs + }(), + }, + expectedActions: []string{"list:pods:kms", "get:secrets:kms", "list:secrets:openshift-config-managed", "get:secrets:openshift-config-managed"}, + }, + + // scenario 10 + { + name: "degraded a pod with invalid condition", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPodInUnknownPhase("kube-apiserver-1", "kms", "node-1"), + }, + expectedActions: []string{"list:pods:kms"}, + expectedError: errors.New("failed to get converged static pod revision: api server pod kube-apiserver-1 in unknown phase"), + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedCondition := operatorv1.OperatorCondition{ + Type: "EncryptionStateControllerDegraded", + Status: "True", + Reason: "Error", + Message: "failed to get converged static pod revision: api server pod kube-apiserver-1 in unknown phase", + } + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, []operatorv1.OperatorCondition{expectedCondition}) + }, + }, + + // scenario 11 + { + name: "no-op as an invalid secret is not considered", + targetNamespace: "kms", + targetGRs: []schema.GroupResource{ + {Group: "", Resource: "secrets"}, + }, + initialResources: []runtime.Object{ + encryptiontesting.CreateDummyKubeAPIPod("kube-apiserver-1", "kms", "node-1"), + func() *corev1.Secret { // encryption config in kms namespace + ecs := createEncryptionCfgSecret(t, "kms", "1", &apiserverconfigv1.EncryptionConfiguration{}) + ecs.Data[encryptionconfig.EncryptionConfSecretName] = []byte{1, 2, 3} // invalid + return ecs + }(), + }, + expectedActions: []string{"list:pods:kms", "get:secrets:kms"}, + expectedError: fmt.Errorf("invalid encryption config kms/encryption-config-1: yaml: control characters are not allowed"), + validateOperatorClientFunc: func(ts *testing.T, operatorClient v1helpers.OperatorClient) { + expectedCondition := operatorv1.OperatorCondition{ + Type: "EncryptionStateControllerDegraded", + Status: "True", + Reason: "Error", + Message: "invalid encryption config kms/encryption-config-1: yaml: control characters are not allowed", + } + encryptiontesting.ValidateOperatorClientConditions(ts, operatorClient, []operatorv1.OperatorCondition{expectedCondition}) + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // setup + fakeOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + OperatorStatus: operatorv1.OperatorStatus{ + // we need to set up proper conditions before the test starts because + // the controller calls UpdateStatus which calls UpdateOperatorStatus method which is unsupported (fake client) and throws an exception + Conditions: []operatorv1.OperatorCondition{ + { + Type: "EncryptionStateControllerDegraded", + Status: "False", + }, + }, + }, + NodeStatuses: []operatorv1.NodeStatus{ + {NodeName: "node-1"}, + }, + }, + nil, + nil, + ) + + fakeKubeClient := fake.NewSimpleClientset(scenario.initialResources...) + realEventRecorder := events.NewRecorder(fakeKubeClient.CoreV1().Events(scenario.targetNamespace), "test-encryptionKeyController", &corev1.ObjectReference{}) + eventRecorder := eventstesting.NewEventRecorder(t, realEventRecorder) + // we pass "openshift-config-managed" and $targetNamespace ns because the controller creates an informer for secrets in that namespace. + // note that the informer factory is not used in the test - it's only needed to create the controller + kubeInformers := v1helpers.NewKubeInformersForNamespaces(fakeKubeClient, "openshift-config-managed", scenario.targetNamespace) + fakeSecretClient := fakeKubeClient.CoreV1() + fakePodClient := fakeKubeClient.CoreV1() + + deployer, err := encryptiondeployer.NewRevisionLabelPodDeployer("revision", scenario.targetNamespace, kubeInformers, nil, fakePodClient, fakeSecretClient, encryptiondeployer.StaticPodNodeProvider{OperatorClient: fakeOperatorClient}) + if err != nil { + t.Fatal(err) + } + target := NewStateController( + scenario.targetNamespace, + deployer, + fakeOperatorClient, + kubeInformers, + fakeSecretClient, + scenario.encryptionSecretSelector, + eventRecorder, + scenario.targetGRs, + ) + + // act + err = target.sync() + + // validate + if err == nil && scenario.expectedError != nil { + t.Fatal("expected to get an error from sync() method") + } + if err != nil && scenario.expectedError == nil { + t.Fatal(err) + } + if err != nil && scenario.expectedError != nil && err.Error() != scenario.expectedError.Error() { + t.Fatalf("unexpected error returned = %v, expected = %v", err, scenario.expectedError) + } + if err := encryptiontesting.ValidateActionsVerbs(fakeKubeClient.Actions(), scenario.expectedActions); err != nil { + t.Fatalf("incorrect action(s) detected: %v", err) + } + if scenario.validateFunc != nil { + scenario.validateFunc(t, fakeKubeClient.Actions(), fmt.Sprintf("%s-%s", encryptionconfig.EncryptionConfSecretName, scenario.targetNamespace), scenario.expectedEncryptionCfg) + } + if scenario.validateOperatorClientFunc != nil { + scenario.validateOperatorClientFunc(t, fakeOperatorClient) + } + }) + } +} + +func validateSecretWithEncryptionConfig(actualSecret *corev1.Secret, expectedEncryptionCfg *apiserverconfigv1.EncryptionConfiguration, expectedSecretName string) error { + actualEncryptionCfg, err := encryptionconfig.FromSecret(actualSecret) + if err != nil { + return fmt.Errorf("failed to verfy the encryption config, due to %v", err) + } + + if !equality.Semantic.DeepEqual(expectedEncryptionCfg, actualEncryptionCfg) { + return fmt.Errorf("%s", diff.ObjectDiff(expectedEncryptionCfg, actualEncryptionCfg)) + } + + // rewrite the payload and compare the rest + expectedSecret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: expectedSecretName, + Namespace: "openshift-config-managed", + Annotations: map[string]string{ + state.KubernetesDescriptionKey: state.KubernetesDescriptionScaryValue, + }, + Finalizers: []string{"encryption.apiserver.operator.openshift.io/deletion-protection"}, + }, + Data: actualSecret.Data, + } + + // those are filled by the server + if len(actualSecret.Kind) == 0 { + actualSecret.Kind = "Secret" + } + if len(actualSecret.APIVersion) == 0 { + actualSecret.APIVersion = corev1.SchemeGroupVersion.String() + } + + if !equality.Semantic.DeepEqual(expectedSecret, actualSecret) { + return fmt.Errorf("%s", diff.ObjectDiff(expectedSecret, actualSecret)) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/crypto/keys.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/crypto/keys.go new file mode 100644 index 00000000000..2d660ce1d51 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/crypto/keys.go @@ -0,0 +1,27 @@ +package crypto + +import ( + "crypto/rand" + + "github.com/openshift/library-go/pkg/operator/encryption/state" +) + +var ( + ModeToNewKeyFunc = map[state.Mode]func() []byte{ + state.AESCBC: NewAES256Key, + state.SecretBox: NewAES256Key, // secretbox requires a 32 byte key so we can reuse the same function here + state.Identity: NewIdentityKey, + } +) + +func NewAES256Key() []byte { + b := make([]byte, 32) // AES-256 == 32 byte key + if _, err := rand.Read(b); err != nil { + panic(err) // rand should never fail + } + return b +} + +func NewIdentityKey() []byte { + return make([]byte, 16) // the key is not used to perform encryption but must be a valid AES key +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/revisionedpod.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/revisionedpod.go new file mode 100644 index 00000000000..ecdeb97a6b0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/revisionedpod.go @@ -0,0 +1,261 @@ +package deployer + +import ( + "fmt" + "strconv" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/encryption/statemachine" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// MasterNodeProvider provides master nodes. +type MasterNodeProvider interface { + // MasterNodeNames returns a list of nodes expected to run API server pods. + MasterNodeNames() ([]string, error) + + // AddEventHandler registers handlers which are called whenever a resource + // changes that can influence the result of Nodes. + AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced +} + +// RevisionLabelPodDeployer is a deployer abstraction meant for the pods with +// a label storing the deployed encryption config revision, like the pods created +// by the staticpod controllers. +type RevisionLabelPodDeployer struct { + podClient corev1client.PodInterface + secretClient corev1client.SecretInterface + + targetNamespaceInformers informers.SharedInformerFactory + + nodeProvider MasterNodeProvider + + revisionLabel string +} + +var ( + _ statemachine.Deployer = &RevisionLabelPodDeployer{} +) + +// NewRevisionLabelPodDeployer creates a deployer abstraction meant for the pods with +// a label storing the deployed encryption config revision, like the pods created +// by the staticpod controllers. +// +// It syns the encryption-config- from openshift-config-managed +// namespace to the target namespace as encryption-config. From there it is +// revisioned and deployed to the static pods. The last deployed encryption +// config is read from encryption-config-. +// +// For testing, resourceSyncer might be nil. +func NewRevisionLabelPodDeployer( + revisionLabel string, + targetNamespace string, + namespaceInformers operatorv1helpers.KubeInformersForNamespaces, + resourceSyncer resourcesynccontroller.ResourceSyncer, + podClient corev1client.PodsGetter, + secretClient corev1client.SecretsGetter, + nodeProvider MasterNodeProvider, +) (*RevisionLabelPodDeployer, error) { + if resourceSyncer != nil { + if err := resourceSyncer.SyncSecret( + resourcesynccontroller.ResourceLocation{Namespace: targetNamespace, Name: encryptionconfig.EncryptionConfSecretName}, + resourcesynccontroller.ResourceLocation{Namespace: "openshift-config-managed", Name: fmt.Sprintf("%s-%s", encryptionconfig.EncryptionConfSecretName, targetNamespace)}, + ); err != nil { + return nil, err + } + } + + return &RevisionLabelPodDeployer{ + podClient: podClient.Pods(targetNamespace), + secretClient: secretClient.Secrets(targetNamespace), + nodeProvider: nodeProvider, + targetNamespaceInformers: namespaceInformers.InformersFor(targetNamespace), + revisionLabel: revisionLabel, + }, nil +} + +// DeployedEncryptionConfigSecret returns the deployed encryption config and whether all +// instances of the operand have acknowledged it. +func (d *RevisionLabelPodDeployer) DeployedEncryptionConfigSecret() (secret *corev1.Secret, converged bool, err error) { + nodes, err := d.nodeProvider.MasterNodeNames() + if err != nil { + return nil, false, err + } + if len(nodes) == 0 { + return nil, false, nil + } + + // do a live list so we never get confused about what revision we are on + apiServerPods, err := d.podClient.List(metav1.ListOptions{LabelSelector: "apiserver=true"}) + if err != nil { + return nil, false, err + } + + revision, err := getAPIServerRevisionOfAllInstances(d.revisionLabel, nodes, apiServerPods.Items) + if err != nil { + return nil, false, fmt.Errorf("failed to get converged static pod revision: %v", err) + } + if len(revision) == 0 { + return nil, false, nil + } + + s, err := d.secretClient.Get(encryptionconfig.EncryptionConfSecretName+"-"+revision, metav1.GetOptions{}) + if err != nil { + // if encryption is not enabled at this revision or the secret was deleted, we should not error + if errors.IsNotFound(err) { + return nil, true, nil + } + return nil, false, err + } + return s, true, nil +} + +// AddEventHandler registers a event handler whenever the backing resource change +// that might influence the result of DeployedEncryptionConfigSecret. +func (d *RevisionLabelPodDeployer) AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced { + targetPodInformer := d.targetNamespaceInformers.Core().V1().Pods().Informer() + targetPodInformer.AddEventHandler(handler) + + targetSecretsInformer := d.targetNamespaceInformers.Core().V1().Secrets().Informer() + targetSecretsInformer.AddEventHandler(handler) + + return append([]cache.InformerSynced{ + targetPodInformer.HasSynced, + targetSecretsInformer.HasSynced, + }, d.nodeProvider.AddEventHandler(handler)...) +} + +// getAPIServerRevisionOfAllInstances attempts to find the current revision that +// the API servers are running at. If all API servers have not converged onto a +// a single revision, it returns the empty string and possibly an error. +// Converged can be defined as: +// 1. All running pods are ready and at the same revision +// 2. All master nodes have a running pod +// 3. There are no pending or unknown pods +// 4. All succeeded and failed pods have revisions that are before the running pods +// Once a converged revision has been determined, it can be used to determine +// what encryption config state has been successfully observed by the API servers. +// It assumes that podClient is doing live lookups against the cluster state. +func getAPIServerRevisionOfAllInstances(revisionLabel string, nodes []string, apiServerPods []corev1.Pod) (string, error) { + good, bad, progressing, err := categorizePods(apiServerPods) + if err != nil { + return "", err + } + if progressing { + return "", nil + } + + goodRevisions := revisions(revisionLabel, good) + goodNodes := nodeNames(good) + failingRevisions := revisions(revisionLabel, bad) + + if len(goodRevisions) != 1 { + return "", nil // api servers have not converged onto a single revision + } + revision, _ := goodRevisions.PopAny() + if len(revision) == 0 { + revision = "0" + } + + if failingRevisions.Has(revision) { + return "", fmt.Errorf("api server revision %s has both running and failed pods", revision) + } + + // make sure all expected nodes are there + missingNodes := []string{} + for _, n := range nodes { + if !goodNodes.Has(n) { + missingNodes = append(missingNodes, n) + } + } + if len(missingNodes) > 0 { + return "", nil // we are still progressing + } + + if len(revision) == 0 { + return "", nil + } + revisionNum, err := strconv.Atoi(revision) + if err != nil { + return "", fmt.Errorf("api server has invalid revision: %v", err) + } + + for _, failedRevision := range failingRevisions.List() { // iterate in defined order + if len(failedRevision) == 0 { + // these will never be bigger than revisionNum + continue + } + failedRevisionNum, err := strconv.Atoi(failedRevision) + if err != nil { + return "", fmt.Errorf("api server has invalid failed revision: %v", err) + } + if failedRevisionNum > revisionNum { // TODO can this dead lock? + return "", fmt.Errorf("api server has failed revision %v which is newer than running revision %v", failedRevisionNum, revisionNum) + } + } + + return revision, nil +} + +func revisions(revisionLabel string, pods []*corev1.Pod) sets.String { + ret := sets.NewString() + for _, p := range pods { + ret.Insert(p.Labels[revisionLabel]) + } + return ret +} + +func nodeNames(pods []*corev1.Pod) sets.String { + ret := sets.NewString() + for _, p := range pods { + ret.Insert(p.Spec.NodeName) + } + return ret +} + +func categorizePods(pods []corev1.Pod) (good []*corev1.Pod, bad []*corev1.Pod, progressing bool, err error) { + if len(pods) == 0 { + return nil, nil, true, err + } + for _, apiServerPod := range pods { + switch phase := apiServerPod.Status.Phase; phase { + case corev1.PodRunning: + if !podReady(apiServerPod) { + return nil, nil, true, nil // pods are not fully ready + } + goodPod := apiServerPod // shallow copy because apiServerPod is bound loop var + good = append(good, &goodPod) + case corev1.PodPending: + return nil, nil, true, nil // pods are not fully ready + case corev1.PodUnknown: + return nil, nil, false, fmt.Errorf("api server pod %s in unknown phase", apiServerPod.Name) + case corev1.PodSucceeded, corev1.PodFailed: + // handle failed pods carefully to make sure things are healthy + // since the API server should never exit, a succeeded pod is considered as failed + badPod := apiServerPod // shallow copy because apiServerPod is bound loop var + bad = append(bad, &badPod) + default: + // error in case new unexpected phases get added + return nil, nil, false, fmt.Errorf("api server pod %s has unexpected phase %v", apiServerPod.Name, phase) + } + } + return good, bad, false, nil +} + +func podReady(pod corev1.Pod) bool { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/revisionedpod_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/revisionedpod_test.go new file mode 100644 index 00000000000..1c5ac433304 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/revisionedpod_test.go @@ -0,0 +1,169 @@ +package deployer + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestCategorizePods(t *testing.T) { + tests := []struct { + name string + pods []corev1.Pod + nodes []string + wantGood []*corev1.Pod + wantBad []*corev1.Pod + wantCategorizeProgressing bool + wantCategorizeErr bool + + wantCommonRevision string + wantGetAPIServerRevisionOfAllInstancesError bool + }{ + {"no pod", nil, nil, nil, nil, true, false, "", false}, + { + "good pods, same revision", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node2"), + }, nil, false, false, "3", false, + }, + { + "good pods, different revision", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + *newPod(corev1.PodRunning, corev1.ConditionTrue, "5", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + newPod(corev1.PodRunning, corev1.ConditionTrue, "5", "node2"), + }, nil, false, false, "", false, + }, + { + "ready and unready pods", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + *newPod(corev1.PodRunning, corev1.ConditionFalse, "3", "node2"), + }, []string{"node1", "node2"}, nil, nil, true, false, "3", false, + }, + { + "good pods and pending pods", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + *newPod(corev1.PodPending, corev1.ConditionFalse, "3", "node2"), + }, []string{"node1", "node2"}, nil, nil, true, false, "3", false, + }, + { + "good pods and failed pods", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + *newPod(corev1.PodFailed, corev1.ConditionFalse, "3", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + }, []*corev1.Pod{ + newPod(corev1.PodFailed, corev1.ConditionFalse, "3", "node2"), + }, false, false, "3", false, + }, + { + "good pods and succeeded pods", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + *newPod(corev1.PodSucceeded, corev1.ConditionFalse, "3", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + }, []*corev1.Pod{ + newPod(corev1.PodSucceeded, corev1.ConditionFalse, "3", "node2"), + }, false, false, "3", false, + }, + { + "good pods and unknown phase pods", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "3", "node1"), + *newPod(corev1.PodUnknown, corev1.ConditionFalse, "3", "node2"), + }, []string{"node1", "node2"}, nil, nil, false, true, "", false, + }, + { + "all empty revision", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + *newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node2"), + }, nil, false, false, "0", false, + }, + { + "one empty revision", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + *newPod(corev1.PodRunning, corev1.ConditionTrue, "1", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + newPod(corev1.PodRunning, corev1.ConditionTrue, "1", "node2"), + }, nil, false, false, "1", false, + }, + { + "one empty revision, one zero", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + *newPod(corev1.PodRunning, corev1.ConditionTrue, "0", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + newPod(corev1.PodRunning, corev1.ConditionTrue, "0", "node2"), + }, nil, false, false, "0", false, + }, + { + "one invalid revision", []corev1.Pod{ + *newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + *newPod(corev1.PodRunning, corev1.ConditionTrue, "abc", "node2"), + }, []string{"node1", "node2"}, []*corev1.Pod{ + newPod(corev1.PodRunning, corev1.ConditionTrue, "", "node1"), + newPod(corev1.PodRunning, corev1.ConditionTrue, "abc", "node2"), + }, nil, false, false, "", true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotGood, gotBad, gotProgressing, err := categorizePods(tt.pods) + if (err != nil) != tt.wantCategorizeErr { + t.Errorf("categorizePods() error = %v, wantErr %v", err, tt.wantCategorizeErr) + return + } + if !reflect.DeepEqual(gotGood, tt.wantGood) { + t.Errorf("categorizePods() gotGood = %v, want %v", gotGood, tt.wantGood) + } + if !reflect.DeepEqual(gotBad, tt.wantBad) { + t.Errorf("categorizePods() gotBad = %v, want %v", gotBad, tt.wantBad) + } + if gotProgressing != tt.wantCategorizeProgressing { + t.Errorf("categorizePods() gotProgressing = %v, want %v", gotProgressing, tt.wantCategorizeProgressing) + } + + if err != nil { + rev, err := getAPIServerRevisionOfAllInstances("revision", tt.nodes, tt.pods) + if (err != nil) != tt.wantCategorizeErr { + t.Errorf("getAPIServerRevisionOfAllInstances() error = %v, wantErr %v", err, tt.wantGetAPIServerRevisionOfAllInstancesError) + return + } + if rev != tt.wantCommonRevision { + t.Errorf("getAPIServerRevisionOfAllInstances() rev = %q, want %q", rev, tt.wantCommonRevision) + } + } + }) + } +} + +func newPod(phase corev1.PodPhase, ready corev1.ConditionStatus, revision, nodeName string) *corev1.Pod { + pod := corev1.Pod{ + TypeMeta: v1.TypeMeta{Kind: "Pod"}, + ObjectMeta: v1.ObjectMeta{ + Labels: map[string]string{ + "revision": revision, + }}, + Spec: corev1.PodSpec{ + NodeName: nodeName, + }, + Status: corev1.PodStatus{ + Phase: phase, + Conditions: []corev1.PodCondition{{ + Type: corev1.PodReady, + Status: ready, + }}, + }, + } + + return &pod +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/staticpod.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/staticpod.go new file mode 100644 index 00000000000..68cce6b936f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/deployer/staticpod.go @@ -0,0 +1,33 @@ +package deployer + +import ( + "k8s.io/client-go/tools/cache" + + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// StaticPodNodeProvider returns the node list from the node status in the static pod operator status. +type StaticPodNodeProvider struct { + OperatorClient operatorv1helpers.StaticPodOperatorClient +} + +var ( + _ MasterNodeProvider = &StaticPodNodeProvider{} +) + +func (p StaticPodNodeProvider) MasterNodeNames() ([]string, error) { + _, status, _, err := p.OperatorClient.GetStaticPodOperatorState() + if err != nil { + return nil, err + } + ret := make([]string, 0, len(status.NodeStatuses)) + for _, n := range status.NodeStatuses { + ret = append(ret, n.NodeName) + } + return ret, nil +} + +func (p StaticPodNodeProvider) AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced { + p.OperatorClient.Informer().AddEventHandler(handler) + return []cache.InformerSynced{p.OperatorClient.Informer().HasSynced} +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/config.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/config.go new file mode 100644 index 00000000000..1eaa7e74011 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/config.go @@ -0,0 +1,201 @@ +package encryptionconfig + +import ( + "encoding/base64" + "sort" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/operator/encryption/crypto" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" +) + +var ( + emptyStaticIdentityKey = base64.StdEncoding.EncodeToString(crypto.NewIdentityKey()) +) + +// FromEncryptionState converts state to config. +func FromEncryptionState(encryptionState map[schema.GroupResource]state.GroupResourceState) *apiserverconfigv1.EncryptionConfiguration { + resourceConfigs := make([]apiserverconfigv1.ResourceConfiguration, 0, len(encryptionState)) + + for gr, grKeys := range encryptionState { + resourceConfigs = append(resourceConfigs, apiserverconfigv1.ResourceConfiguration{ + Resources: []string{gr.String()}, // we are forced to lose data here because this API is broken + Providers: stateToProviders(grKeys), + }) + } + + // make sure our output is stable + sort.Slice(resourceConfigs, func(i, j int) bool { + return resourceConfigs[i].Resources[0] < resourceConfigs[j].Resources[0] // each resource has its own keys + }) + + return &apiserverconfigv1.EncryptionConfiguration{Resources: resourceConfigs} +} + +// ToEncryptionState converts config to state. +// Read keys contain a potential write key. Read keys are sorted, recent first. +// +// It assumes: +// - the first provider provides the write key +// - the structure of the encryptionConfig matches the output generated by FromEncryptionState: +// - one resource per provider +// - one key per provider +// - each resource has a distinct configuration with zero or more key based providers and the identity provider. +// - the last providers might be of type aesgcm. Then it carries the names of identity keys, recent first. +// We never use aesgcm as a real key because it is unsafe. +func ToEncryptionState(encryptionConfig *apiserverconfigv1.EncryptionConfiguration, keySecrets []*corev1.Secret) (map[schema.GroupResource]state.GroupResourceState, []state.KeyState) { + backedKeys := make([]state.KeyState, 0, len(keySecrets)) + for _, s := range keySecrets { + km, err := secrets.ToKeyState(s) + if err != nil { + klog.Warningf("skipping invalid secret: %v", err) + continue + } + km.Backed = true + backedKeys = append(backedKeys, km) + } + backedKeys = state.SortRecentFirst(backedKeys) + + if encryptionConfig == nil { + return nil, backedKeys + } + + out := map[schema.GroupResource]state.GroupResourceState{} + for _, resourceConfig := range encryptionConfig.Resources { + // resources should be a single group resource + if len(resourceConfig.Resources) != 1 { + klog.Warningf("skipping invalid encryption config for resource %s", resourceConfig.Resources) + continue // should never happen + } + + grState := state.GroupResourceState{} + + for i, provider := range resourceConfig.Providers { + var ks state.KeyState + + switch { + case provider.AESCBC != nil && len(provider.AESCBC.Keys) == 1: + ks = state.KeyState{ + Key: provider.AESCBC.Keys[0], + Mode: state.AESCBC, + } + + case provider.Secretbox != nil && len(provider.Secretbox.Keys) == 1: + ks = state.KeyState{ + Key: provider.Secretbox.Keys[0], + Mode: state.SecretBox, + } + + case provider.Identity != nil: + // skip fake provider. If this is write-key, wait for first aesgcm provider providing the write key. + continue + + case provider.AESGCM != nil && len(provider.AESGCM.Keys) == 1 && provider.AESGCM.Keys[0].Secret == emptyStaticIdentityKey: + ks = state.KeyState{ + Key: provider.AESGCM.Keys[0], + Mode: state.Identity, + } + + default: + klog.Infof("skipping invalid provider index %d for resource %s", i, resourceConfig.Resources[0]) + continue // should never happen + } + + // enrich KeyState with values from secrets + for _, k := range backedKeys { + if state.EqualKeyAndEqualID(&ks, &k) { + ks = k + break + } + } + + if i == 0 || (ks.Mode == state.Identity && !grState.HasWriteKey()) { + grState.WriteKey = ks + } + + grState.ReadKeys = append(grState.ReadKeys, ks) // also for write key as they are also read keys + } + + // sort read-keys, recent first + grState.ReadKeys = state.SortRecentFirst(grState.ReadKeys) + + out[schema.ParseGroupResource(resourceConfig.Resources[0])] = grState + } + + return out, backedKeys +} + +// stateToProviders maps the write and read secrets to the equivalent read and write keys. +// it primarily handles the conversion of KeyState to the appropriate provider config. +// the identity mode is transformed into a custom aesgcm provider that simply exists to +// curry the associated null key secret through the encryption state machine. +func stateToProviders(desired state.GroupResourceState) []apiserverconfigv1.ProviderConfiguration { + allKeys := desired.ReadKeys + + providers := make([]apiserverconfigv1.ProviderConfiguration, 0, len(allKeys)+1) // one extra for identity + + // Write key comes first. Filter it out in the tail of read keys. + if desired.HasWriteKey() { + allKeys = append([]state.KeyState{desired.WriteKey}, allKeys...) + for i := 1; i < len(allKeys); i++ { + if state.EqualKeyAndEqualID(&allKeys[i], &desired.WriteKey) { + allKeys = append(allKeys[:i], allKeys[i+1:]...) + break + } + } + } else { + // no write key => identity write key + providers = append(providers, apiserverconfigv1.ProviderConfiguration{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }) + } + + aesgcmProviders := []apiserverconfigv1.ProviderConfiguration{} + for i, key := range allKeys { + switch key.Mode { + case state.AESCBC: + providers = append(providers, apiserverconfigv1.ProviderConfiguration{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{key.Key}, + }, + }) + case state.SecretBox: + providers = append(providers, apiserverconfigv1.ProviderConfiguration{ + Secretbox: &apiserverconfigv1.SecretboxConfiguration{ + Keys: []apiserverconfigv1.Key{key.Key}, + }, + }) + case state.Identity: + if i == 0 { + providers = append(providers, apiserverconfigv1.ProviderConfiguration{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }) + } + aesgcmProviders = append(aesgcmProviders, apiserverconfigv1.ProviderConfiguration{ + AESGCM: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{key.Key}, + }, + }) + default: + // this should never happen because our input should always be valid + klog.Infof("skipping key %s as it has invalid mode %s", key.Key.Name, key.Mode) + } + } + + // add fallback identity provider. + if providers[0].Identity == nil { + providers = append(providers, apiserverconfigv1.ProviderConfiguration{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }) + } + + // add fake aesgm providers carrying identity names + providers = append(providers, aesgcmProviders...) + + return providers +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/config_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/config_test.go new file mode 100644 index 00000000000..d18933a6e94 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/config_test.go @@ -0,0 +1,553 @@ +package encryptionconfig + +import ( + "encoding/base64" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" + encryptiontesting "github.com/openshift/library-go/pkg/operator/encryption/testing" +) + +func TestToEncryptionState(t *testing.T) { + scenarios := []struct { + name string + input *apiserverconfigv1.EncryptionConfiguration + output map[schema.GroupResource]state.GroupResourceState + }{ + // scenario 1 + { + name: "single write key", + input: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc", + }, + ReadKeys: []state.KeyState{{ + Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc", + }}, + }, + }, + }, + + // scenario 2 + { + name: "multiple keys", + input: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + { + Name: "33", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + {Key: apiserverconfigv1.Key{Name: "33", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + }, + }, + + // scenario 3 + { + name: "single write key multiple resources", + input: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := []encryptiontesting.EncryptionKeysResourceTuple{ + { + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + + { + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey(keysRes) + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + {Group: "", Resource: "configmaps"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + }, + }, + + // scenario 4 + { + name: "multiple keys and multiple resources", + input: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := []encryptiontesting.EncryptionKeysResourceTuple{ + { + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + { + Name: "33", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + + { + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + { + Name: "33", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + }, + }, + } + ec := encryptiontesting.CreateEncryptionCfgWithWriteKey(keysRes) + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + {Key: apiserverconfigv1.Key{Name: "33", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + {Group: "", Resource: "configmaps"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + {Key: apiserverconfigv1.Key{Name: "33", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + }, + }, + + // scenario 5 + { + name: "single read key", + input: func() *apiserverconfigv1.EncryptionConfiguration { + ec := encryptiontesting.CreateEncryptionCfgNoWriteKey("34", "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", "secrets") + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + }, + }, + + // scenario 6 + { + name: "single read key multiple resources", + input: func() *apiserverconfigv1.EncryptionConfiguration { + ec := encryptiontesting.CreateEncryptionCfgNoWriteKey("34", "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", "secrets", "configmaps") + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + {Group: "", Resource: "configmaps"}: { + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + }, + }, + + // scenario 7 + { + name: "turn off encryption for single resource", + input: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := encryptiontesting.EncryptionKeysResourceTuple{ + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + { + Name: "35", + Secret: newFakeIdentityEncodedKeyForTest(), + }, + }, + Modes: []string{"aescbc", "aesgcm"}, + } + ec := encryptiontesting.CreateEncryptionCfgNoWriteKeyMultipleReadKeys([]encryptiontesting.EncryptionKeysResourceTuple{keysRes}) + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "35", Secret: newFakeIdentityEncodedKeyForTest()}, Mode: "identity", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "35", Secret: newFakeIdentityEncodedKeyForTest()}, Mode: "identity"}, + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + }, + }, + + // scenario 8 + { + name: "turn off encryption for multiple resources", + input: func() *apiserverconfigv1.EncryptionConfiguration { + keysRes := []encryptiontesting.EncryptionKeysResourceTuple{ + { + Resource: "secrets", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + + // stateToProviders puts "fakeIdentityProvider" as last + { + Name: "35", + Secret: newFakeIdentityEncodedKeyForTest(), + }, + }, + Modes: []string{"aescbc", "aesgcm"}, + }, + + { + Resource: "configmaps", + Keys: []apiserverconfigv1.Key{ + { + Name: "34", + Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc=", + }, + + // stateToProviders puts "fakeIdentityProvider" as last + { + Name: "35", + Secret: newFakeIdentityEncodedKeyForTest(), + }, + }, + Modes: []string{"aescbc", "aesgcm"}, + }, + } + ec := encryptiontesting.CreateEncryptionCfgNoWriteKeyMultipleReadKeys(keysRes) + return ec + }(), + output: map[schema.GroupResource]state.GroupResourceState{ + {Group: "", Resource: "secrets"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "35", Secret: newFakeIdentityEncodedKeyForTest()}, Mode: "identity", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "35", Secret: newFakeIdentityEncodedKeyForTest()}, Mode: "identity"}, + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + + {Group: "", Resource: "configmaps"}: { + WriteKey: state.KeyState{ + Key: apiserverconfigv1.Key{Name: "35", Secret: newFakeIdentityEncodedKeyForTest()}, Mode: "identity", + }, + ReadKeys: []state.KeyState{ + {Key: apiserverconfigv1.Key{Name: "35", Secret: newFakeIdentityEncodedKeyForTest()}, Mode: "identity"}, + {Key: apiserverconfigv1.Key{Name: "34", Secret: "MTcxNTgyYTBmY2Q2YzVmZGI2NWNiZjVhM2U5MjQ5ZDc="}, Mode: "aescbc"}, + }, + }, + }, + }, + + // scenario 9 + // TODO: encryption on after being off + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + actualOutput, _ := ToEncryptionState(scenario.input, nil) + + if len(actualOutput) != len(scenario.output) { + t.Fatalf("expected to get %d GR, got %d", len(scenario.output), len(actualOutput)) + } + for actualGR, actualKeys := range actualOutput { + if _, ok := scenario.output[actualGR]; !ok { + t.Fatalf("unexpected GR %v found", actualGR) + } + expectedKeys, _ := scenario.output[actualGR] + if !cmp.Equal(expectedKeys.WriteKey, actualKeys.WriteKey, cmp.AllowUnexported(state.GroupResourceState{}.WriteKey)) { + t.Fatal(fmt.Errorf("%s", cmp.Diff(expectedKeys.WriteKey, actualKeys.WriteKey, cmp.AllowUnexported(state.GroupResourceState{}.WriteKey)))) + } + if !cmp.Equal(expectedKeys.ReadKeys, actualKeys.ReadKeys, cmp.AllowUnexported(state.GroupResourceState{}.WriteKey)) { + t.Fatal(fmt.Errorf("%s", cmp.Diff(expectedKeys.ReadKeys, actualKeys.ReadKeys, cmp.AllowUnexported(state.GroupResourceState{}.WriteKey)))) + } + } + }) + } +} + +func TestFromEncryptionState(t *testing.T) { + scenarios := []struct { + name string + grs []schema.GroupResource + targetNs string + writeKeyIn *corev1.Secret + readKeysIn []*corev1.Secret + output []apiserverconfigv1.ResourceConfiguration + makeOutput func(writeKey *corev1.Secret, readKeys []*corev1.Secret) []apiserverconfigv1.ResourceConfiguration + }{ + // scenario 1 + { + name: "turn off encryption for single resource", + grs: []schema.GroupResource{{Group: "", Resource: "secrets"}}, + targetNs: "kms", + writeKeyIn: encryptiontesting.CreateEncryptionKeySecretWithRawKeyWithMode("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 3, newFakeIdentityKeyForTest(), "identity"), + readKeysIn: []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 2, []byte("61def964fb967f5d7c44a2af8dab6865")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 1, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + makeOutput: func(writeKey *corev1.Secret, readKeys []*corev1.Secret) []apiserverconfigv1.ResourceConfiguration { + rs := apiserverconfigv1.ResourceConfiguration{} + rs.Resources = []string{"secrets"} + rs.Providers = []apiserverconfigv1.ProviderConfiguration{ + {Identity: &apiserverconfigv1.IdentityConfiguration{}}, + {AESCBC: keyToAESConfiguration(readKeys[0])}, + {AESCBC: keyToAESConfiguration(readKeys[1])}, + {AESGCM: keyToAESConfiguration(writeKey)}, + } + return []apiserverconfigv1.ResourceConfiguration{rs} + }, + }, + + // scenario 2 + { + name: "order of the keys is preserved, the write key comes first, then the read keys finally the identity comes last", + grs: []schema.GroupResource{{Group: "", Resource: "secrets"}}, + targetNs: "kms", + writeKeyIn: encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 3, []byte("16f87d5793a3cb726fb9be7ef8211821")), + readKeysIn: []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 2, []byte("558bf68d6d8ab5dd819eec02901766c1")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 1, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + makeOutput: func(writeKey *corev1.Secret, readKeys []*corev1.Secret) []apiserverconfigv1.ResourceConfiguration { + rs := apiserverconfigv1.ResourceConfiguration{} + rs.Resources = []string{"secrets"} + rs.Providers = []apiserverconfigv1.ProviderConfiguration{ + {AESCBC: keyToAESConfiguration(writeKey)}, + {AESCBC: keyToAESConfiguration(readKeys[0])}, + {AESCBC: keyToAESConfiguration(readKeys[1])}, + {Identity: &apiserverconfigv1.IdentityConfiguration{}}, + } + return []apiserverconfigv1.ResourceConfiguration{rs} + }, + }, + + // scenario 3 + { + name: "the identity comes first up when there are no keys", + grs: []schema.GroupResource{{Group: "", Resource: "secrets"}}, + targetNs: "kms", + makeOutput: func(writeKey *corev1.Secret, readKeys []*corev1.Secret) []apiserverconfigv1.ResourceConfiguration { + rs := apiserverconfigv1.ResourceConfiguration{} + rs.Resources = []string{"secrets"} + rs.Providers = []apiserverconfigv1.ProviderConfiguration{{Identity: &apiserverconfigv1.IdentityConfiguration{}}} + return []apiserverconfigv1.ResourceConfiguration{rs} + }, + }, + + // scenario 4 + { + name: "order of the keys is preserved, the write key comes first, then the read keys finally the identity comes last - multiple resources", + grs: []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, + targetNs: "kms", + writeKeyIn: encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, 3, []byte("16f87d5793a3cb726fb9be7ef8211821")), + readKeysIn: []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, 2, []byte("558bf68d6d8ab5dd819eec02901766c1")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, 1, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + makeOutput: func(writeKey *corev1.Secret, readKeys []*corev1.Secret) []apiserverconfigv1.ResourceConfiguration { + rc := apiserverconfigv1.ResourceConfiguration{} + rc.Resources = []string{"configmaps"} + rc.Providers = []apiserverconfigv1.ProviderConfiguration{ + {AESCBC: keyToAESConfiguration(writeKey)}, + {AESCBC: keyToAESConfiguration(readKeys[0])}, + {AESCBC: keyToAESConfiguration(readKeys[1])}, + {Identity: &apiserverconfigv1.IdentityConfiguration{}}, + } + + rs := apiserverconfigv1.ResourceConfiguration{} + rs.Resources = []string{"secrets"} + rs.Providers = []apiserverconfigv1.ProviderConfiguration{ + {AESCBC: keyToAESConfiguration(writeKey)}, + {AESCBC: keyToAESConfiguration(readKeys[0])}, + {AESCBC: keyToAESConfiguration(readKeys[1])}, + {Identity: &apiserverconfigv1.IdentityConfiguration{}}, + } + + return []apiserverconfigv1.ResourceConfiguration{rc, rs} + }, + }, + + // scenario 5 + { + name: "turn off encryption for multiple resources", + grs: []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, + targetNs: "kms", + writeKeyIn: encryptiontesting.CreateEncryptionKeySecretWithRawKeyWithMode("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, 3, newFakeIdentityKeyForTest(), "identity"), + readKeysIn: []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}, {Group: "", Resource: "configmaps"}}, 2, []byte("61def964fb967f5d7c44a2af8dab6865")), + encryptiontesting.CreateExpiredMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "secrets"}}, 1, []byte("61def964fb967f5d7c44a2af8dab6865")), + }, + makeOutput: func(writeKey *corev1.Secret, readKeys []*corev1.Secret) []apiserverconfigv1.ResourceConfiguration { + rc := apiserverconfigv1.ResourceConfiguration{} + rc.Resources = []string{"configmaps"} + rc.Providers = []apiserverconfigv1.ProviderConfiguration{ + {Identity: &apiserverconfigv1.IdentityConfiguration{}}, + {AESCBC: keyToAESConfiguration(readKeys[0])}, + {AESCBC: keyToAESConfiguration(readKeys[1])}, + {AESGCM: keyToAESConfiguration(writeKey)}, + } + + rs := apiserverconfigv1.ResourceConfiguration{} + rs.Resources = []string{"secrets"} + rs.Providers = []apiserverconfigv1.ProviderConfiguration{ + {Identity: &apiserverconfigv1.IdentityConfiguration{}}, + {AESCBC: keyToAESConfiguration(readKeys[0])}, + {AESCBC: keyToAESConfiguration(readKeys[1])}, + {AESGCM: keyToAESConfiguration(writeKey)}, + } + return []apiserverconfigv1.ResourceConfiguration{rc, rs} + }, + }, + + // scenario 6 + // TODO: encryption on after being off + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + + readKeyStatesIn := make([]state.KeyState, 0, len(scenario.readKeysIn)) + for _, s := range scenario.readKeysIn { + ks, err := secrets.ToKeyState(s) + if err != nil { + t.Fatal(err) + } + readKeyStatesIn = append(readKeyStatesIn, ks) + } + + var writeKeyStateIn state.KeyState + if scenario.writeKeyIn != nil { + var err error + writeKeyStateIn, err = secrets.ToKeyState(scenario.writeKeyIn) + if err != nil { + t.Fatal(err) + } + } + + grState := map[schema.GroupResource]state.GroupResourceState{} + for _, gr := range scenario.grs { + ks := state.GroupResourceState{ + ReadKeys: readKeyStatesIn, + WriteKey: writeKeyStateIn, + } + grState[gr] = ks + } + actualOutput := FromEncryptionState(grState) + expectedOutput := scenario.makeOutput(scenario.writeKeyIn, scenario.readKeysIn) + + if !cmp.Equal(expectedOutput, actualOutput.Resources) { + t.Fatal(fmt.Errorf("%s", cmp.Diff(expectedOutput, actualOutput.Resources))) + } + }) + } +} + +func keyToAESConfiguration(key *corev1.Secret) *apiserverconfigv1.AESConfiguration { + id, ok := state.NameToKeyID(key.Name) + if !ok { + panic(fmt.Sprintf("invalid test secret name %q", key.Name)) + } + return &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{ + { + Name: fmt.Sprintf("%d", id), + Secret: base64.StdEncoding.EncodeToString(key.Data[secrets.EncryptionSecretKeyDataKey]), + }, + }, + } +} + +func newFakeIdentityEncodedKeyForTest() string { + return "AAAAAAAAAAAAAAAAAAAAAA==" +} + +func newFakeIdentityKeyForTest() []byte { + return make([]byte, 16) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/secret.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/secret.go new file mode 100644 index 00000000000..0a75d5375b5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig/secret.go @@ -0,0 +1,74 @@ +package encryptionconfig + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/state" +) + +var ( + apiserverScheme = runtime.NewScheme() + apiserverCodecs = serializer.NewCodecFactory(apiserverScheme) +) + +func init() { + utilruntime.Must(apiserverconfigv1.AddToScheme(apiserverScheme)) +} + +// EncryptionConfSecretName is the name of the final encryption config secret that is revisioned per apiserver rollout. +const EncryptionConfSecretName = "encryption-config" + +// EncryptionConfSecretKey is the map data key used to store the raw bytes of the final encryption config. +const EncryptionConfSecretKey = "encryption-config" + +func FromSecret(encryptionConfigSecret *corev1.Secret) (*apiserverconfigv1.EncryptionConfiguration, error) { + data, ok := encryptionConfigSecret.Data[EncryptionConfSecretKey] + if !ok { + return nil, nil + } + + decoder := apiserverCodecs.UniversalDecoder(apiserverconfigv1.SchemeGroupVersion) + encryptionConfigObj, err := runtime.Decode(decoder, data) + if err != nil { + return nil, err + } + + encryptionConfig, ok := encryptionConfigObj.(*apiserverconfigv1.EncryptionConfiguration) + if !ok { + return nil, fmt.Errorf("unexpected wrong type %T", encryptionConfigObj) + } + return encryptionConfig, nil +} + +func ToSecret(ns, name string, encryptionCfg *apiserverconfigv1.EncryptionConfiguration) (*corev1.Secret, error) { + encoder := apiserverCodecs.LegacyCodec(apiserverconfigv1.SchemeGroupVersion) + rawEncryptionCfg, err := runtime.Encode(encoder, encryptionCfg) + if err != nil { + return nil, fmt.Errorf("failed to encode the encryption config: %v", err) + } + + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Annotations: map[string]string{ + state.KubernetesDescriptionKey: state.KubernetesDescriptionScaryValue, + }, + Finalizers: []string{"encryption.apiserver.operator.openshift.io/deletion-protection"}, + }, + Data: map[string][]byte{ + EncryptionConfSecretName: rawEncryptionCfg, + }, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/observer/observe_encryption_config.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/observer/observe_encryption_config.go new file mode 100644 index 00000000000..f853c857468 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/observer/observe_encryption_config.go @@ -0,0 +1,75 @@ +package observer + +import ( + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + corev1lister "k8s.io/client-go/listers/core/v1" + + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/events" +) + +type SecretsListers interface { + SecretLister() corev1lister.SecretLister +} + +// NewEncryptionConfigObserver sets encryption-provider-config flag to /etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config +// in the configuration file if encryption-config in the targetNamespace is found +// +// note: +// the flag is not removed when the encryption-config was accidentally removed +// there is an active reconciliation loop in place that will eventually synchronize the missing resource +func NewEncryptionConfigObserver(targetNamespace string, encryptionConfFilePath string) configobserver.ObserveConfigFunc { + return func(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + encryptionConfigPath := []string{"apiServerArguments", "encryption-provider-config"} + listers := genericListers.(SecretsListers) + var errs []error + previouslyObservedConfig := map[string]interface{}{} + + existingEncryptionConfig, _, err := unstructured.NestedStringSlice(existingConfig, encryptionConfigPath...) + if err != nil { + return previouslyObservedConfig, append(errs, err) + } + + if len(existingEncryptionConfig) > 0 { + if err := unstructured.SetNestedStringSlice(previouslyObservedConfig, existingEncryptionConfig, encryptionConfigPath...); err != nil { + errs = append(errs, err) + } + } + + previousEncryptionConfigFound := len(existingEncryptionConfig) > 0 + observedConfig := map[string]interface{}{} + + encryptionConfigSecret, err := listers.SecretLister().Secrets(targetNamespace).Get(encryptionconfig.EncryptionConfSecretName) + if errors.IsNotFound(err) { + // warn only if the encryption-provider-config flag was set before + if previousEncryptionConfigFound { + recorder.Warningf("ObserveEncryptionConfigNotFound", "encryption config secret %s/%s not found after encryption has been enabled", targetNamespace, encryptionconfig.EncryptionConfSecretName) + } + // encryption secret is optional so it doesn't prevent apiserver from running + // there is an active reconciliation loop in place that will eventually synchronize the missing resource + return previouslyObservedConfig, errs // do not append the not found error + } + if err != nil { + recorder.Warningf("ObserveEncryptionConfigGetErr", "failed to get encryption config secret %s/%s: %v", targetNamespace, encryptionconfig.EncryptionConfSecretName, err) + return previouslyObservedConfig, append(errs, err) + } + if len(encryptionConfigSecret.Data[encryptionconfig.EncryptionConfSecretKey]) == 0 { + recorder.Warningf("ObserveEncryptionConfigNoData", "encryption config secret %s/%s missing data", targetNamespace, encryptionconfig.EncryptionConfSecretName) + return previouslyObservedConfig, errs + } + + if err := unstructured.SetNestedStringSlice(observedConfig, []string{encryptionConfFilePath}, encryptionConfigPath...); err != nil { + recorder.Warningf("ObserveEncryptionConfigFailedSet", "failed setting encryption config: %v", err) + return previouslyObservedConfig, append(errs, err) + } + + if !equality.Semantic.DeepEqual(existingEncryptionConfig, []string{encryptionConfFilePath}) { + recorder.Eventf("ObserveEncryptionConfigChanged", "encryption config file changed from %s to %s", existingEncryptionConfig, encryptionConfFilePath) + } + + return observedConfig, errs + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/observer/observe_encryption_config_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/observer/observe_encryption_config_test.go new file mode 100644 index 00000000000..146504d9a65 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/observer/observe_encryption_config_test.go @@ -0,0 +1,178 @@ +package observer + +import ( + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + corelistersv1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + encryptiontesting "github.com/openshift/library-go/pkg/operator/encryption/testing" + "github.com/openshift/library-go/pkg/operator/events" +) + +type secretsListers struct { + configobserver.Listers + + secretLister_ corelistersv1.SecretLister +} + +func (l secretsListers) SecretLister() corelistersv1.SecretLister { + return l.secretLister_ +} + +func TestEncryptionConfigObserver(t *testing.T) { + scenarios := []struct { + name string + input map[string]interface{} + initialResources []runtime.Object + + expectedOutput map[string]interface{} + expectedEvents []*corev1.Event + }{ + // scenario 1 + { + name: "a secret with encryption config exits thus encryption-provider-config flag is set", + initialResources: func() []runtime.Object { + ret := []runtime.Object{} + ec := encryptiontesting.CreateEncryptionCfgNoWriteKey("1", "NjFkZWY5NjRmYjk2N2Y1ZDdjNDRhMmFmOGRhYjY4NjU=", "secrets") + ecs, err := encryptionconfig.ToSecret("kms", "encryption-config", ec) + if err != nil { + t.Fatal(err) + } + ret = append(ret, ecs) + return ret + }(), + expectedOutput: func() map[string]interface{} { + ret := map[string]interface{}{} + ret["apiServerArguments"] = map[string]interface{}{ + "encryption-provider-config": []interface{}{"/etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config"}, + } + return ret + }(), + expectedEvents: []*corev1.Event{ + {Reason: "ObserveEncryptionConfigChanged", Message: "encryption config file changed from [] to /etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config"}, + }, + }, + + // scenario 2 + { + name: "no secret with encryption config exits thus no encryption-provider-config flag is set", + expectedOutput: map[string]interface{}{}, + expectedEvents: []*corev1.Event{}, // we expect no events + }, + + // scenario 3 + { + name: "encryption-provider-config flag was set in the past but the secret with encryption config is missing", + input: func() map[string]interface{} { + ret := map[string]interface{}{} + ret["apiServerArguments"] = map[string]interface{}{ + "encryption-provider-config": []interface{}{"/etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config"}, + } + return ret + }(), + expectedOutput: func() map[string]interface{} { + ret := map[string]interface{}{} + ret["apiServerArguments"] = map[string]interface{}{ + "encryption-provider-config": []interface{}{"/etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config"}, + } + return ret + }(), + expectedEvents: []*corev1.Event{ + {Reason: "ObserveEncryptionConfigNotFound", Message: "encryption config secret kms/encryption-config not found after encryption has been enabled"}, + }, + }, + + // scenario 4 + { + name: "warn about encryption-provider-config value change", + initialResources: func() []runtime.Object { + ret := []runtime.Object{} + ec := encryptiontesting.CreateEncryptionCfgNoWriteKey("1", "NjFkZWY5NjRmYjk2N2Y1ZDdjNDRhMmFmOGRhYjY4NjU=", "secrets") + ecs, err := encryptionconfig.ToSecret("kms", "encryption-config", ec) + if err != nil { + t.Fatal(err) + } + ret = append(ret, ecs) + return ret + }(), + input: func() map[string]interface{} { + ret := map[string]interface{}{} + ret["apiServerArguments"] = map[string]interface{}{ + "encryption-provider-config": []interface{}{"some_path"}, + } + return ret + }(), + expectedOutput: func() map[string]interface{} { + ret := map[string]interface{}{} + ret["apiServerArguments"] = map[string]interface{}{ + "encryption-provider-config": []interface{}{"/etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config"}, + } + return ret + }(), + expectedEvents: []*corev1.Event{ + {Reason: "ObserveEncryptionConfigChanged", Message: "encryption config file changed from [some_path] to /etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config"}, + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + listers := secretsListers{} + { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, obj := range scenario.initialResources { + err := indexer.Add(obj) + if err != nil { + t.Fatal(err) + } + } + listers.secretLister_ = corelistersv1.NewSecretLister(indexer) + } + eventRec := events.NewInMemoryRecorder("encryption-config-observer") + + target := NewEncryptionConfigObserver("kms", "/etc/kubernetes/static-pod-resources/secrets/encryption-config/encryption-config") + result, err := target(listers, eventRec, scenario.input) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(result, scenario.expectedOutput) { + t.Fatal(fmt.Errorf("%s", cmp.Diff(result, scenario.expectedOutput))) + } + + // validate events + { + recordedEvents := eventRec.Events() + if len(scenario.expectedEvents) != len(recordedEvents) { + t.Fatalf("expected to observe %d events but got %d", len(scenario.expectedEvents), len(recordedEvents)) + } + + for _, recordedEvent := range recordedEvents { + expectedEvent := recordedEvent.DeepCopy() + recordedEventFound := false + + for _, expectedEventShort := range scenario.expectedEvents { + expectedEvent.Message = expectedEventShort.Message + expectedEvent.Reason = expectedEventShort.Reason + if cmp.Equal(expectedEvent, recordedEvent) { + recordedEventFound = true + break + } + } + + if !recordedEventFound { + t.Fatalf("expected event with reason = %q and message %q wasn't found\n recorded events = %v", expectedEvent.Reason, expectedEvent.Message, recordedEvents) + } + } + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/secrets.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/secrets.go new file mode 100644 index 00000000000..a2804e5b89f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/secrets.go @@ -0,0 +1,138 @@ +package secrets + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/encryption/state" +) + +// ToKeyState converts a key secret to a key state. +func ToKeyState(s *corev1.Secret) (state.KeyState, error) { + data := s.Data[EncryptionSecretKeyDataKey] + + keyID, validKeyID := state.NameToKeyID(s.Name) + if !validKeyID { + return state.KeyState{}, fmt.Errorf("secret %s/%s has an invalid name", s.Namespace, s.Name) + } + + key := state.KeyState{ + Key: apiserverconfigv1.Key{ + // we use keyID as the name to limit the length of the field as it is used as a prefix for every value in etcd + Name: strconv.FormatUint(keyID, 10), + Secret: base64.StdEncoding.EncodeToString(data), + }, + Backed: true, + } + + if v, ok := s.Annotations[EncryptionSecretMigratedTimestamp]; ok { + ts, err := time.Parse(time.RFC3339, v) + if err != nil { + return state.KeyState{}, fmt.Errorf("secret %s/%s has invalid %s annotation: %v", s.Namespace, s.Name, EncryptionSecretMigratedTimestamp, err) + } + key.Migrated.Timestamp = ts + } + + if v, ok := s.Annotations[EncryptionSecretMigratedResources]; ok && len(v) > 0 { + migrated := &MigratedGroupResources{} + if err := json.Unmarshal([]byte(v), migrated); err != nil { + return state.KeyState{}, fmt.Errorf("secret %s/%s has invalid %s annotation: %v", s.Namespace, s.Name, EncryptionSecretMigratedResources, err) + } + key.Migrated.Resources = migrated.Resources + } + + if v, ok := s.Annotations[encryptionSecretInternalReason]; ok && len(v) > 0 { + key.InternalReason = v + } + if v, ok := s.Annotations[encryptionSecretExternalReason]; ok && len(v) > 0 { + key.ExternalReason = v + } + + keyMode := state.Mode(s.Annotations[encryptionSecretMode]) + switch keyMode { + case state.AESCBC, state.SecretBox, state.Identity: + key.Mode = keyMode + default: + return state.KeyState{}, fmt.Errorf("secret %s/%s has invalid mode: %s", s.Namespace, s.Name, keyMode) + } + if keyMode != state.Identity && len(data) == 0 { + return state.KeyState{}, fmt.Errorf("secret %s/%s of mode %q must have non-empty key", s.Namespace, s.Name, keyMode) + } + + return key, nil +} + +// ToKeyState converts a key state to a key secret. +func FromKeyState(component string, ks state.KeyState) (*corev1.Secret, error) { + bs, err := base64.StdEncoding.DecodeString(ks.Key.Secret) + if err != nil { + return nil, fmt.Errorf("failed to decode key string") + } + + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("encryption-key-%s-%s", component, ks.Key.Name), + Namespace: "openshift-config-managed", + Labels: map[string]string{ + EncryptionKeySecretsLabel: component, + }, + Annotations: map[string]string{ + state.KubernetesDescriptionKey: state.KubernetesDescriptionScaryValue, + + encryptionSecretMode: string(ks.Mode), + encryptionSecretInternalReason: ks.InternalReason, + encryptionSecretExternalReason: ks.ExternalReason, + }, + Finalizers: []string{EncryptionSecretFinalizer}, + }, + Data: map[string][]byte{ + EncryptionSecretKeyDataKey: bs, + }, + } + + if !ks.Migrated.Timestamp.IsZero() { + s.Annotations[EncryptionSecretMigratedTimestamp] = ks.Migrated.Timestamp.Format(time.RFC3339) + } + if len(ks.Migrated.Resources) > 0 { + migrated := MigratedGroupResources{Resources: ks.Migrated.Resources} + bs, err := json.Marshal(migrated) + if err != nil { + return nil, err + } + s.Annotations[EncryptionSecretMigratedResources] = string(bs) + } + + return s, nil +} + +// HasResource returns whether the given group resource is contained in the migrated group resource list. +func (m *MigratedGroupResources) HasResource(resource schema.GroupResource) bool { + for _, gr := range m.Resources { + if gr == resource { + return true + } + } + return false +} + +// ListKeySecrets returns the current key secrets from openshift-config-managed. +func ListKeySecrets(secretClient corev1client.SecretsGetter, encryptionSecretSelector metav1.ListOptions) ([]*corev1.Secret, error) { + encryptionSecretList, err := secretClient.Secrets("openshift-config-managed").List(encryptionSecretSelector) + if err != nil { + return nil, err + } + var encryptionSecrets []*corev1.Secret + for i := range encryptionSecretList.Items { + encryptionSecrets = append(encryptionSecrets, &encryptionSecretList.Items[i]) + } + return encryptionSecrets, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/secrets_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/secrets_test.go new file mode 100644 index 00000000000..9eb97cd83af --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/secrets_test.go @@ -0,0 +1,96 @@ +package secrets + +import ( + "encoding/base64" + "reflect" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + v1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/utils/diff" + + "github.com/openshift/library-go/pkg/operator/encryption/state" +) + +func TestRoundtrip(t *testing.T) { + now, _ := time.Parse(time.RFC3339, time.Now().Format(time.RFC3339)) + + tests := []struct { + name string + component string + ks state.KeyState + }{ + { + name: "full aescbc", + component: "kms", + ks: state.KeyState{ + Key: v1.Key{ + Name: "54", + Secret: base64.StdEncoding.EncodeToString([]byte("abcdef")), + }, + Backed: true, // this will be set by ToKeyState() + Mode: "aescbc", + Migrated: state.MigrationState{ + Timestamp: now, + Resources: []schema.GroupResource{ + {Resource: "secrets"}, + {Resource: "configmaps"}, + {Group: "networking.openshift.io", Resource: "routes"}, + }, + }, + InternalReason: "internal", + ExternalReason: "external", + }, + }, + { + name: "sparse aescbc", + component: "kms", + ks: state.KeyState{ + Key: v1.Key{ + Name: "54", + Secret: base64.StdEncoding.EncodeToString([]byte("abcdef")), + }, + Backed: true, // this will be set by ToKeyState() + Mode: "aescbc", + }, + }, + { + name: "identity", + component: "kms", + ks: state.KeyState{ + Key: v1.Key{ + Name: "54", + Secret: "", + }, + Backed: true, // this will be set by ToKeyState() + Mode: "identity", + Migrated: state.MigrationState{ + Timestamp: now, + Resources: []schema.GroupResource{ + {Resource: "secrets"}, + {Resource: "configmaps"}, + {Group: "networking.openshift.io", Resource: "routes"}, + }, + }, + InternalReason: "internal", + ExternalReason: "external", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := FromKeyState(tt.component, tt.ks) + if err != nil { + t.Fatalf("unexpected FromKeyState() error: %v", err) + } + got, err := ToKeyState(s) + if err != nil { + t.Fatalf("unexpected ToKeyState() error: %v", err) + } + if !reflect.DeepEqual(got, tt.ks) { + t.Errorf("roundtrip error:\n%s", diff.ObjectDiff(tt.ks, got)) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/types.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/types.go new file mode 100644 index 00000000000..7161e4a1249 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/secrets/types.go @@ -0,0 +1,59 @@ +package secrets + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // This label is used to find secrets that build up the final encryption config. The names of the + // secrets are in format - (the uint is the keyID). + // For example, openshift-kube-apiserver-encryption-3. Note that other than the -3 postfix, the name of + // the secret is irrelevant since the label is used to find the secrets. Of course the key minting + // controller cares about the entire name since it needs to know when it has already created a secret for a given + // keyID meaning it cannot just use a random prefix. As such the name must include the data that is contained + // within the label. Thus the format used is -encryption-. This keeps everything distinct + // and fully deterministic. The keys are ordered by keyID where a smaller ID means an earlier key. + // This means that the latest secret (the one with the largest keyID) is the current desired write key. + EncryptionKeySecretsLabel = "encryption.apiserver.operator.openshift.io/component" + + // These annotations are used to mark the current observed state of a secret. + + // The time (in RFC3339 format) at which the migrated state observation occurred. The key minting + // controller parses this field to determine if enough time has passed and a new key should be created. + EncryptionSecretMigratedTimestamp = "encryption.apiserver.operator.openshift.io/migrated-timestamp" + // The list of resources that were migrated when encryptionSecretMigratedTimestamp was set. + // See the MigratedGroupResources struct below to understand the JSON encoding used. + EncryptionSecretMigratedResources = "encryption.apiserver.operator.openshift.io/migrated-resources" + + // encryptionSecretMode is the annotation that determines how the provider associated with a given key is + // configured. For example, a key could be used with AES-CBC or Secretbox. This allows for algorithm + // agility. When the default mode used by the key minting controller changes, it will force the creation + // of a new key under the new mode even if encryptionSecretMigrationInterval has not been reached. + encryptionSecretMode = "encryption.apiserver.operator.openshift.io/mode" + + // encryptionSecretInternalReason is the annotation that denotes why a particular key + // was created based on "internal" reasons (i.e. key minting controller decided a new + // key was needed for some reason X). It is tracked solely for the purposes of debugging. + encryptionSecretInternalReason = "encryption.apiserver.operator.openshift.io/internal-reason" + + // encryptionSecretExternalReason is the annotation that denotes why a particular key was created based on + // "external" reasons (i.e. force key rotation for some reason Y). It allows the key minting controller to + // determine if a new key should be created even if encryptionSecretMigrationInterval has not been reached. + encryptionSecretExternalReason = "encryption.apiserver.operator.openshift.io/external-reason" + + // In the data field of the secret API object, this (map) key is used to hold the actual encryption key + // (i.e. for AES-CBC mode the value associated with this map key is 32 bytes of random noise). + EncryptionSecretKeyDataKey = "encryption.apiserver.operator.openshift.io-key" + + // encryptionSecretFinalizer is a finalizer attached to all secrets generated + // by the encryption controllers. Its sole purpose is to prevent the accidental + // deletion of secrets by enforcing a two phase delete. + EncryptionSecretFinalizer = "encryption.apiserver.operator.openshift.io/deletion-protection" +) + +// MigratedGroupResources is the data structured stored in the +// encryption.apiserver.operator.openshift.io/migrated-resources +// of a key secret. +type MigratedGroupResources struct { + Resources []schema.GroupResource `json:"resources"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/state/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/state/helpers.go new file mode 100644 index 00000000000..656aeafef6f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/state/helpers.go @@ -0,0 +1,84 @@ +package state + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MigratedFor returns whether all given resources are marked as migrated in the given key. +// It returns missing GRs and a reason if that's not the case. +func MigratedFor(grs []schema.GroupResource, km KeyState) (ok bool, missing []schema.GroupResource, reason string) { + var missingStrings []string + for _, gr := range grs { + found := false + for _, mgr := range km.Migrated.Resources { + if mgr == gr { + found = true + break + } + } + if !found { + missing = append(missing, gr) + missingStrings = append(missingStrings, gr.String()) + } + } + + if len(missing) > 0 { + return false, missing, fmt.Sprintf("key ID %s misses resource %s among migrated resources", km.Key.Name, strings.Join(missingStrings, ",")) + } + + return true, nil, "" +} + +// KeysWithPotentiallyPersistedDataAndNextReadKey returns the minimal, recent secrets which have migrated all given GRs. +func KeysWithPotentiallyPersistedDataAndNextReadKey(grs []schema.GroupResource, recentFirstSortedKeys []KeyState) []KeyState { + for i, k := range recentFirstSortedKeys { + if allMigrated, missing, _ := MigratedFor(grs, k); allMigrated { + if i+1 < len(recentFirstSortedKeys) { + return recentFirstSortedKeys[:i+2] + } else { + return recentFirstSortedKeys[:i+1] + } + } else { + // continue with keys we haven't found a migration key for yet + grs = missing + } + } + return recentFirstSortedKeys +} + +func SortRecentFirst(unsorted []KeyState) []KeyState { + ret := make([]KeyState, len(unsorted)) + copy(ret, unsorted) + sort.Slice(ret, func(i, j int) bool { + // it is fine to ignore the validKeyID bool here because we filtered out invalid secrets in the loop above + iKeyID, _ := NameToKeyID(ret[i].Key.Name) + jKeyID, _ := NameToKeyID(ret[j].Key.Name) + return iKeyID > jKeyID + }) + return ret +} + +func NameToKeyID(name string) (uint64, bool) { + lastIdx := strings.LastIndex(name, "-") + idString := name + if lastIdx >= 0 { + idString = name[lastIdx+1:] // this can never overflow since str[-1+1:] is + } + id, err := strconv.ParseUint(idString, 10, 0) + return id, err == nil +} + +func EqualKeyAndEqualID(s1, s2 *KeyState) bool { + if s1.Mode != s2.Mode || s1.Key.Secret != s2.Key.Secret { + return false + } + + id1, valid1 := NameToKeyID(s1.Key.Name) + id2, valid2 := NameToKeyID(s2.Key.Name) + return valid1 && valid2 && id1 == id2 +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/state/types.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/state/types.go new file mode 100644 index 00000000000..9a7174e43c2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/state/types.go @@ -0,0 +1,69 @@ +package state + +import ( + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" +) + +// These annotations try to scare anyone away from editing the encryption secrets. It is trivial for +// an external actor to break the invariants of the state machine and render the cluster unrecoverable. +const ( + KubernetesDescriptionKey = "kubernetes.io/description" + KubernetesDescriptionScaryValue = `WARNING: DO NOT EDIT. +Altering of the encryption secrets will render you cluster inaccessible. +Catastrophic data loss can occur from the most minor changes.` +) + +// GroupResourceState represents, for a single group resource, the write and read keys in a +// format that can be directly translated to and from the on disk EncryptionConfiguration object. +type GroupResourceState struct { + // the write key of the group resource. + WriteKey KeyState + // all read keys of the group resource. Potentially includes the write key. + ReadKeys []KeyState +} + +func (k GroupResourceState) HasWriteKey() bool { + return len(k.WriteKey.Key.Name) > 0 && len(k.WriteKey.Key.Secret) > 0 +} + +type KeyState struct { + Key apiserverconfigv1.Key + Mode Mode + + // described whether it is backed by a secret. + Backed bool + Migrated MigrationState + // some controller logic caused this secret to be created by the key controller. + InternalReason string + // the user via unsupportConfigOverrides.encryption.reason triggered this key. + ExternalReason string +} + +type MigrationState struct { + // the timestamp fo the last migration + Timestamp time.Time + // the resources that were migrated at some point in time to this key. + Resources []schema.GroupResource +} + +// Mode is the value associated with the encryptionSecretMode annotation +type Mode string + +// The current set of modes that are supported along with the default Mode that is used. +// These values are encoded into the secret and thus must not be changed. +// Strings are used over iota because they are easier for a human to understand. +const ( + AESCBC Mode = "aescbc" // available from the first release, see defaultMode below + SecretBox Mode = "secretbox" // available from the first release, see defaultMode below + Identity Mode = "identity" // available from the first release, see defaultMode below + + // Changing this value requires caution to not break downgrades. + // Specifically, if some new Mode is released in version X, that new Mode cannot + // be used as the defaultMode until version X+1. Thus on a downgrade the operator + // from version X will still be able to honor the observed encryption state + // (and it will do a key rotation to force the use of the old defaultMode). + DefaultMode = Identity // we default to encryption being disabled for now +) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/statemachine/transition.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/statemachine/transition.go new file mode 100644 index 00000000000..4b94ff045f9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/statemachine/transition.go @@ -0,0 +1,201 @@ +package statemachine + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" +) + +// Deployer abstracts the deployment machanism like the static pod controllers. +type Deployer interface { + // DeployedEncryptionConfigSecret returns the deployed encryption config and whether all + // instances of the operand have acknowledged it. + DeployedEncryptionConfigSecret() (secret *corev1.Secret, converged bool, err error) + + // AddEventHandler registers a event handler whenever the backing resource change + // that might influence the result of DeployedEncryptionConfigSecret. + AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced +} + +func GetEncryptionConfigAndState( + deployer Deployer, + secretClient corev1client.SecretsGetter, + encryptionSecretSelector metav1.ListOptions, + encryptedGRs []schema.GroupResource, +) (current *apiserverconfigv1.EncryptionConfiguration, desired map[schema.GroupResource]state.GroupResourceState, encryptionSecrets []*corev1.Secret, transitioningReason string, err error) { + // get current config + encryptionConfigSecret, converged, err := deployer.DeployedEncryptionConfigSecret() + if err != nil { + return nil, nil, nil, "", err + } + if !converged { + return nil, nil, nil, "APIServerRevisionNotConverged", nil + } + var encryptionConfig *apiserverconfigv1.EncryptionConfiguration + if encryptionConfigSecret != nil { + encryptionConfig, err = encryptionconfig.FromSecret(encryptionConfigSecret) + if err != nil { + return nil, nil, nil, "", fmt.Errorf("invalid encryption config %s/%s: %v", encryptionConfigSecret.Namespace, encryptionConfigSecret.Name, err) + } + } + + // compute desired config + encryptionSecrets, err = secrets.ListKeySecrets(secretClient, encryptionSecretSelector) + if err != nil { + return nil, nil, nil, "", err + } + desiredEncryptionState := getDesiredEncryptionState(encryptionConfig, encryptionSecrets, encryptedGRs) + + return encryptionConfig, desiredEncryptionState, encryptionSecrets, "", nil +} + +// getDesiredEncryptionState returns the desired state of encryption for all resources. +// To do this it compares the current state against the available secrets and to-be-encrypted resources. +// oldEncryptionConfig can be nil if there is no config yet. +// If there are no secrets, the identity is set for all resources as write key. +// It is assumed that encryptionSecrets are all valid. +// +// The basic rules are: +// +// 1. don't do anything if there are key secrets. +// 2. every GR must have all the read-keys (existing as secrets) since last complete migration. +// 3. if (2) is the case, the write-key must be the most recent key. +// 4. if (2) and (3) are the case, all non-write keys should be removed. +func getDesiredEncryptionState(oldEncryptionConfig *apiserverconfigv1.EncryptionConfiguration, encryptionSecrets []*corev1.Secret, toBeEncryptedGRs []schema.GroupResource) map[schema.GroupResource]state.GroupResourceState { + // + // STEP 0: start with old encryption config, and alter it towards the desired state in the following STEPs. + // + desiredEncryptionState, backedKeys := encryptionconfig.ToEncryptionState(oldEncryptionConfig, encryptionSecrets) + if desiredEncryptionState == nil { + desiredEncryptionState = make(map[schema.GroupResource]state.GroupResourceState, len(toBeEncryptedGRs)) + } + + // add new resources without keys. These resources will trigger STEP 2. + oldEncryptedGRs := make([]schema.GroupResource, 0, len(desiredEncryptionState)) + for _, gr := range toBeEncryptedGRs { + if _, ok := desiredEncryptionState[gr]; !ok { + desiredEncryptionState[gr] = state.GroupResourceState{} + } else { + oldEncryptedGRs = append(oldEncryptedGRs, gr) + } + } + + // + // STEP 1: without secrets, wait for the key controller to create one + // + // the code after this point assumes at least one secret + if len(backedKeys) == 0 { + klog.V(4).Infof("no encryption secrets found") + return desiredEncryptionState + } + + // + // STEP 2: verify to have all necessary read-keys. If not, add them, deploy and wait for stability. + // + // Note: we never drop keys here. Dropping only happens in STEP 4. + // Note: only keysWithPotentiallyPersistedData are considered. There might be more which are not pruned yet by the pruning controller. + // + // TODO: allow removing resources (e.g. on downgrades) and transition back to identity. + allReadSecretsAsExpected := true + currentlyEncryptedGRs := oldEncryptedGRs + if oldEncryptionConfig == nil { + // if the config is not there, we assume it was deleted. Assume worst case when finding + // potentially persisted data keys. + currentlyEncryptedGRs = toBeEncryptedGRs + } + expectedReadSecrets := state.KeysWithPotentiallyPersistedDataAndNextReadKey(currentlyEncryptedGRs, backedKeys) + for gr, grState := range desiredEncryptionState { + changed := false + for _, expected := range expectedReadSecrets { + found := false + for _, rk := range grState.ReadKeys { + if state.EqualKeyAndEqualID(&rk, &expected) { + found = true + break + } + } + if !found { + // Just adding raw key without trusting any metadata on it + grState.ReadKeys = state.SortRecentFirst(append(grState.ReadKeys, expected)) // sort into right position + changed = true + allReadSecretsAsExpected = false + klog.V(4).Infof("encrypted resource %s misses read key %s", gr, expected.Key.Name) + } + } + if changed { + grState.ReadKeys = state.SortRecentFirst(grState.ReadKeys) + desiredEncryptionState[gr] = grState + } + + // potential write-key must be backed. Otherwise stop here in STEP 2 and let key controller create a new key. + if !grState.ReadKeys[0].Backed { + allReadSecretsAsExpected = false + } + } + if !allReadSecretsAsExpected { + klog.V(4).Infof("not all read secrets in sync") + return desiredEncryptionState + } + + // + // STEP 3: with consistent read-keys, verify first read-key is write-key. If not, set write-key and wait for stability. + // + writeKey := backedKeys[0] + allWriteSecretsAsExpected := true + for gr, grState := range desiredEncryptionState { + if !grState.HasWriteKey() || !state.EqualKeyAndEqualID(&grState.WriteKey, &writeKey) { + allWriteSecretsAsExpected = false + klog.V(4).Infof("encrypted resource %s does not have write key %s", gr, writeKey.Key.Name) + break + } + } + if !allWriteSecretsAsExpected { + klog.V(4).Infof("not all write secrets in sync") + for gr := range desiredEncryptionState { + grState := desiredEncryptionState[gr] + grState.WriteKey = writeKey + desiredEncryptionState[gr] = grState + } + return desiredEncryptionState + } + + // + // STEP 4: with consistent read-keys and write-keys, remove every read-key other than the write-key and one last read key. + // + // Note: because read-keys are consistent, currentlyEncryptedGRs equals toBeEncryptedGRs + allMigrated, _, reason := state.MigratedFor(currentlyEncryptedGRs, writeKey) + if !allMigrated { + klog.V(4).Infof(reason) + return desiredEncryptionState + } + for gr := range desiredEncryptionState { + grState := desiredEncryptionState[gr] + + // cut down read keys to all expected read keys, and everything in between + if len(expectedReadSecrets) == 0 { + grState.ReadKeys = []state.KeyState{} + } else { + lastExpected := expectedReadSecrets[len(expectedReadSecrets)-1] + for i, rk := range grState.ReadKeys { + if state.EqualKeyAndEqualID(&rk, &lastExpected) { + grState.ReadKeys = grState.ReadKeys[:i+1] + break + } + } + } + + desiredEncryptionState[gr] = grState + } + klog.V(4).Infof("write key %s set as sole write key", writeKey.Key.Name) + return desiredEncryptionState +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/statemachine/transition_test.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/statemachine/transition_test.go new file mode 100644 index 00000000000..d9af7d94bea --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/statemachine/transition_test.go @@ -0,0 +1,1010 @@ +package statemachine + +import ( + "encoding/base64" + "reflect" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/utils/diff" + + "github.com/openshift/library-go/pkg/operator/encryption/encryptionconfig" + "github.com/openshift/library-go/pkg/operator/encryption/state" + encryptiontesting "github.com/openshift/library-go/pkg/operator/encryption/testing" +) + +func TestGetDesiredEncryptionState(t *testing.T) { + type args struct { + oldEncryptionConfig *apiserverconfigv1.EncryptionConfiguration + targetNamespace string + encryptionSecrets []*corev1.Secret + toBeEncryptedGRs []schema.GroupResource + } + type ValidateState func(ts *testing.T, args *args, state map[schema.GroupResource]state.GroupResourceState) + + equalsConfig := func(expected *apiserverconfigv1.EncryptionConfiguration) func(ts *testing.T, args *args, state map[schema.GroupResource]state.GroupResourceState) { + return func(ts *testing.T, _ *args, state map[schema.GroupResource]state.GroupResourceState) { + if expected == nil && state != nil { + ts.Errorf("expected nil state, got: %#v", state) + return + } + if expected != nil && state == nil { + ts.Errorf("expected non-nil state corresponding to config %#v", expected) + return + } + if expected == nil && state == nil { + return + } + expected := expected.DeepCopy() + expected.TypeMeta = metav1.TypeMeta{} + encryptionConfig := encryptionconfig.FromEncryptionState(state) + if !reflect.DeepEqual(expected, encryptionConfig) { + ts.Errorf("unexpected encryption config (A: expected, B: got):\n%s", diff.ObjectDiff(expected, encryptionConfig)) + } + } + } + + outputMatchingInputConfig := func(ts *testing.T, args *args, state map[schema.GroupResource]state.GroupResourceState) { + equalsConfig(args.oldEncryptionConfig)(ts, args, state) + } + + tests := []struct { + name string + args args + validate ValidateState + }{ + { + "first run: no config, no secrets => nothing done, state with identities for each resource", + args{ + nil, + "kms", + nil, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }, + }), + }, + { + "config exists without write keys, no secrets => nothing done, config unchanged", + args{ + encryptiontesting.CreateEncryptionCfgNoWriteKey("1", "NzFlYTdjOTE0MTlhNjhmZDEyMjRmODhkNTAzMTZiNGU=", "configmaps", "secrets"), + "kms", + nil, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + outputMatchingInputConfig, + }, + { + "config exists with write keys, no secrets => nothing done, config unchanged", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}, + }, + "kms", + nil, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}}), + }, + { + "config exists with only one resource => 2nd resource is added", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}, + }, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }}, + }, + }, + }), + }, + { + "config exists with two resources => 2nd resource stays", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}, + }, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }, + }), + }, + { + "no config, secrets exist => first config is created", + args{ + nil, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("71ea7c91419a68fd1224f88d50316b4e")), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("71ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }}, + }, + }}), + }, + { + "no config, multiple secrets exists, some migrated => config is recreated, with identity as write key", + args{ + nil, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 5, []byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}}, 4, []byte("447907494bßc4897b876c8476bf807bc"), time.Now()), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, 3, []byte("3cbfbe7d76876e076b076c659cd895ff"), time.Now()), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}}, 2, []byte("2b234b23cb23c4b2cb24cb24bcbffbca")), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, 1, []byte("11ea7c91419a68fd1224f88d50316b4a"), time.Now()), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "4", + Secret: base64.StdEncoding.EncodeToString([]byte("447907494bßc4897b876c8476bf807bc")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3cbfbe7d76876e076b076c659cd895ff")), + }}, + }, + }, { + // one more read key for backup/recovery + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2b234b23cb23c4b2cb24cb24bcbffbca")), + }}, + }, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "4", + Secret: base64.StdEncoding.EncodeToString([]byte("447907494bßc4897b876c8476bf807bc")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3cbfbe7d76876e076b076c659cd895ff")), + }}, + }, + }, { + // one more read key for backup/recovery + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2b234b23cb23c4b2cb24cb24bcbffbca")), + }}, + }, + }}, + }, + }}), + }, + { + "config exists, write key secret is missing => no-op", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "4", + Secret: base64.StdEncoding.EncodeToString([]byte("447907494bßc4897b876c8476bf807bc")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3cbfbe7d76876e076b076c659cd895ff")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "4", + Secret: base64.StdEncoding.EncodeToString([]byte("447907494bßc4897b876c8476bf807bc")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3cbfbe7d76876e076b076c659cd895ff")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }}, + "kms", + []*corev1.Secret{ + // missing: encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 5, []byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}}, 4, []byte("447907494bßc4897b876c8476bf807bc"), time.Now()), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, 3, []byte("3cbfbe7d76876e076b076c659cd895ff"), time.Now()), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}}, 2, []byte("2b234b23cb23c4b2cb24cb24bcbffbca")), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, 1, []byte("11ea7c91419a68fd1224f88d50316b4a"), time.Now()), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + // 4 is becoming new write key, not 5! + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "4", + Secret: base64.StdEncoding.EncodeToString([]byte("447907494bßc4897b876c8476bf807bc")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3cbfbe7d76876e076b076c659cd895ff")), + }}, + }, + }, { + // one more read key for backup/recovery + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2b234b23cb23c4b2cb24cb24bcbffbca")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "4", + Secret: base64.StdEncoding.EncodeToString([]byte("447907494bßc4897b876c8476bf807bc")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3cbfbe7d76876e076b076c659cd895ff")), + }}, + }, + }, { + // one more read key for backup/recovery + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2b234b23cb23c4b2cb24cb24bcbffbca")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }}), + }, + { + "config exists without identity => identity is appended", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }}, + }, + }}, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 5, []byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "5", + Secret: base64.StdEncoding.EncodeToString([]byte("55b5bcbc85cb857c7c07c56c54983cbcd")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }, + }), + }, + { + "config exists, new key secret => new key added as read key", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}, + }, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("11ea7c91419a68fd1224f88d50316b4e")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 2, []byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }, + }), + }, + { + "config exists, read keys are consistent => new write key is set", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}, + }, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("11ea7c91419a68fd1224f88d50316b4e")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 2, []byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }, + }), + }, + { + "config exists, read+write keys are consistent, not migrated => nothing changes", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}, + }, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("11ea7c91419a68fd1224f88d50316b4e")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 2, []byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("2bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }, + }), + }, + { + "config exists, read+write keys are consistent, migrated => old read-keys are pruned from config", + args{ + &apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{{ + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("21ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("21ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "1", + Secret: base64.StdEncoding.EncodeToString([]byte("11ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }}, + }, + "kms", + []*corev1.Secret{ + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 1, []byte("11ea7c91419a68fd1224f88d50316b4e")), + encryptiontesting.CreateEncryptionKeySecretWithRawKey("kms", nil, 2, []byte("21ea7c91419a68fd1224f88d50316b4e")), + encryptiontesting.CreateMigratedEncryptionKeySecretWithRawKey("kms", []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, 3, []byte("3bc2bdbc2bec2ebce7b27ce792639723"), time.Now()), + }, + []schema.GroupResource{{Group: "", Resource: "configmaps"}, {Group: "", Resource: "secrets"}}, + }, + equalsConfig(&apiserverconfigv1.EncryptionConfiguration{ + Resources: []apiserverconfigv1.ResourceConfiguration{ + { + Resources: []string{"configmaps"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("21ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + { + Resources: []string{"secrets"}, + Providers: []apiserverconfigv1.ProviderConfiguration{{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "3", + Secret: base64.StdEncoding.EncodeToString([]byte("3bc2bdbc2bec2ebce7b27ce792639723")), + }}, + }, + }, { + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{{ + Name: "2", + Secret: base64.StdEncoding.EncodeToString([]byte("21ea7c91419a68fd1224f88d50316b4e")), + }}, + }, + }, { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }}, + }, + }, + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := getDesiredEncryptionState(tt.args.oldEncryptionConfig, tt.args.encryptionSecrets, tt.args.toBeEncryptedGRs) + if tt.validate != nil { + tt.validate(t, &tt.args, got) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/encryption/testing/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/encryption/testing/helpers.go new file mode 100644 index 00000000000..950acc61151 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/encryption/testing/helpers.go @@ -0,0 +1,280 @@ +package testing + +import ( + "encoding/json" + "errors" + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + clientgotesting "k8s.io/client-go/testing" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/encryption/state" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + encryptionSecretKeyDataForTest = "encryption.apiserver.operator.openshift.io-key" + encryptionSecretMigratedTimestampForTest = "encryption.apiserver.operator.openshift.io/migrated-timestamp" + encryptionSecretMigratedResourcesForTest = "encryption.apiserver.operator.openshift.io/migrated-resources" +) + +func CreateEncryptionKeySecretNoData(targetNS string, grs []schema.GroupResource, keyID uint64) *corev1.Secret { + return CreateEncryptionKeySecretNoDataWithMode(targetNS, grs, keyID, "aescbc") +} + +func CreateEncryptionKeySecretNoDataWithMode(targetNS string, grs []schema.GroupResource, keyID uint64, mode string) *corev1.Secret { + s := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("encryption-key-%s-%d", targetNS, keyID), + Namespace: "openshift-config-managed", + Annotations: map[string]string{ + state.KubernetesDescriptionKey: state.KubernetesDescriptionScaryValue, + + "encryption.apiserver.operator.openshift.io/mode": mode, + "encryption.apiserver.operator.openshift.io/internal-reason": "", + "encryption.apiserver.operator.openshift.io/external-reason": "", + }, + Labels: map[string]string{ + "encryption.apiserver.operator.openshift.io/component": targetNS, + }, + Finalizers: []string{"encryption.apiserver.operator.openshift.io/deletion-protection"}, + }, + Data: map[string][]byte{}, + } + + if len(grs) > 0 { + migratedResourceBytes, err := json.Marshal(secrets.MigratedGroupResources{Resources: grs}) + if err != nil { + panic(err) + } + s.Annotations[encryptionSecretMigratedResourcesForTest] = string(migratedResourceBytes) + } + + return s +} + +func CreateEncryptionKeySecretWithRawKey(targetNS string, grs []schema.GroupResource, keyID uint64, rawKey []byte) *corev1.Secret { + return CreateEncryptionKeySecretWithRawKeyWithMode(targetNS, grs, keyID, rawKey, "aescbc") +} + +func CreateEncryptionKeySecretWithRawKeyWithMode(targetNS string, grs []schema.GroupResource, keyID uint64, rawKey []byte, mode string) *corev1.Secret { + secret := CreateEncryptionKeySecretNoDataWithMode(targetNS, grs, keyID, mode) + secret.Data[encryptionSecretKeyDataForTest] = rawKey + return secret +} + +func CreateEncryptionKeySecretWithKeyFromExistingSecret(targetNS string, grs []schema.GroupResource, keyID uint64, existingSecret *corev1.Secret) *corev1.Secret { + secret := CreateEncryptionKeySecretNoData(targetNS, grs, keyID) + if rawKey, exist := existingSecret.Data[encryptionSecretKeyDataForTest]; exist { + secret.Data[encryptionSecretKeyDataForTest] = rawKey + } + return secret +} + +func CreateMigratedEncryptionKeySecretWithRawKey(targetNS string, grs []schema.GroupResource, keyID uint64, rawKey []byte, ts time.Time) *corev1.Secret { + secret := CreateEncryptionKeySecretWithRawKey(targetNS, grs, keyID, rawKey) + secret.Annotations[encryptionSecretMigratedTimestampForTest] = ts.Format(time.RFC3339) + return secret +} + +func CreateExpiredMigratedEncryptionKeySecretWithRawKey(targetNS string, grs []schema.GroupResource, keyID uint64, rawKey []byte) *corev1.Secret { + return CreateMigratedEncryptionKeySecretWithRawKey(targetNS, grs, keyID, rawKey, time.Now().Add(-(time.Hour*24*7 + time.Hour))) +} + +func CreateDummyKubeAPIPod(name, namespace string, nodeName string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + "apiserver": "true", + "revision": "1", + }, + }, + Spec: corev1.PodSpec{ + NodeName: nodeName, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + } +} + +func CreateDummyKubeAPIPodInUnknownPhase(name, namespace string, nodeName string) *corev1.Pod { + p := CreateDummyKubeAPIPod(name, namespace, nodeName) + p.Status.Phase = corev1.PodUnknown + return p +} + +func ValidateActionsVerbs(actualActions []clientgotesting.Action, expectedActions []string) error { + if len(actualActions) != len(expectedActions) { + return fmt.Errorf("expected to get %d actions but got %d\nexpected=%v \n got=%v", len(expectedActions), len(actualActions), expectedActions, actionStrings(actualActions)) + } + for i, a := range actualActions { + if got, expected := actionString(a), expectedActions[i]; got != expected { + return fmt.Errorf("at %d got %s, expected %s", i, got, expected) + } + } + return nil +} + +func actionString(a clientgotesting.Action) string { + return a.GetVerb() + ":" + a.GetResource().Resource + ":" + a.GetNamespace() +} + +func actionStrings(actions []clientgotesting.Action) []string { + res := make([]string, 0, len(actions)) + for _, a := range actions { + res = append(res, actionString(a)) + } + return res +} + +func CreateEncryptionCfgNoWriteKey(keyID string, keyBase64 string, resources ...string) *apiserverconfigv1.EncryptionConfiguration { + keysResources := []EncryptionKeysResourceTuple{} + for _, resource := range resources { + keysResources = append(keysResources, EncryptionKeysResourceTuple{ + Resource: resource, + Keys: []apiserverconfigv1.Key{ + {Name: keyID, Secret: keyBase64}, + }, + }) + + } + return CreateEncryptionCfgNoWriteKeyMultipleReadKeys(keysResources) +} + +func CreateEncryptionCfgNoWriteKeyMultipleReadKeys(keysResources []EncryptionKeysResourceTuple) *apiserverconfigv1.EncryptionConfiguration { + ec := &apiserverconfigv1.EncryptionConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "EncryptionConfiguration", + APIVersion: "apiserver.config.k8s.io/v1", + }, + Resources: []apiserverconfigv1.ResourceConfiguration{}, + } + + for _, keysResource := range keysResources { + rc := apiserverconfigv1.ResourceConfiguration{ + Resources: []string{keysResource.Resource}, + Providers: []apiserverconfigv1.ProviderConfiguration{ + { + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }, + }, + } + for i, key := range keysResource.Keys { + desiredMode := "" + if len(keysResource.Modes) == len(keysResource.Keys) { + desiredMode = keysResource.Modes[i] + } + switch desiredMode { + case "aesgcm": + rc.Providers = append(rc.Providers, apiserverconfigv1.ProviderConfiguration{ + AESGCM: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{key}, + }, + }) + default: + rc.Providers = append(rc.Providers, apiserverconfigv1.ProviderConfiguration{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{key}, + }, + }) + } + } + ec.Resources = append(ec.Resources, rc) + } + + return ec +} + +func CreateEncryptionCfgWithWriteKey(keysResources []EncryptionKeysResourceTuple) *apiserverconfigv1.EncryptionConfiguration { + configurations := []apiserverconfigv1.ResourceConfiguration{} + for _, keysResource := range keysResources { + // TODO allow secretbox -> not sure if EncryptionKeysResourceTuple makes sense + providers := []apiserverconfigv1.ProviderConfiguration{} + for _, key := range keysResource.Keys { + providers = append(providers, apiserverconfigv1.ProviderConfiguration{ + AESCBC: &apiserverconfigv1.AESConfiguration{ + Keys: []apiserverconfigv1.Key{key}, + }, + }) + } + providers = append(providers, apiserverconfigv1.ProviderConfiguration{ + Identity: &apiserverconfigv1.IdentityConfiguration{}, + }) + + configurations = append(configurations, apiserverconfigv1.ResourceConfiguration{ + Resources: []string{keysResource.Resource}, + Providers: providers, + }) + } + + return &apiserverconfigv1.EncryptionConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "EncryptionConfiguration", + APIVersion: "apiserver.config.k8s.io/v1", + }, + Resources: configurations, + } +} + +type EncryptionKeysResourceTuple struct { + Resource string + Keys []apiserverconfigv1.Key + // an ordered list of an encryption modes thatch matches the keys + // for example mode[0] matches keys[0] + Modes []string +} + +func ValidateOperatorClientConditions(ts *testing.T, operatorClient v1helpers.OperatorClient, expectedConditions []operatorv1.OperatorCondition) { + ts.Helper() + _, status, _, err := operatorClient.GetOperatorState() + if err != nil { + ts.Fatal(err) + } + + if len(status.Conditions) != len(expectedConditions) { + ts.Fatalf("expected to get %d conditions from operator client but got %d:\n\nexpected=%v\n\ngot=%v", len(expectedConditions), len(status.Conditions), expectedConditions, status.Conditions) + } + + for _, actualCondition := range status.Conditions { + actualConditionValidated := false + for _, expectedCondition := range expectedConditions { + expectedCondition.LastTransitionTime = actualCondition.LastTransitionTime + if equality.Semantic.DeepEqual(expectedCondition, actualCondition) { + actualConditionValidated = true + break + } + } + if !actualConditionValidated { + ts.Fatalf("unexpected condition found %#v", actualCondition) + } + + } +} + +func ValidateEncryptionKey(secret *corev1.Secret) error { + rawKey, exist := secret.Data[encryptionSecretKeyDataForTest] + if !exist { + return errors.New("the secret doesn't contain an encryption key") + } + if len(rawKey) != 32 { + return fmt.Errorf("incorrect length of the encryption key, expected 32, got %d bytes", len(rawKey)) + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS new file mode 100644 index 00000000000..4f189b70875 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - mfojtik + - deads2k + - sttts +approvers: + - mfojtik + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_testing.go b/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_testing.go new file mode 100644 index 00000000000..83ea0e88d47 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_testing.go @@ -0,0 +1,46 @@ +package eventstesting + +import ( + "fmt" + "testing" + + "github.com/openshift/library-go/pkg/operator/events" +) + +type TestingEventRecorder struct { + t *testing.T + component string +} + +// NewTestingEventRecorder provides event recorder that will log all recorded events to the error log. +func NewTestingEventRecorder(t *testing.T) events.Recorder { + return &TestingEventRecorder{t: t, component: "test"} +} + +func (r *TestingEventRecorder) ComponentName() string { + return r.component +} + +func (r *TestingEventRecorder) ForComponent(c string) events.Recorder { + return &TestingEventRecorder{t: r.t, component: c} +} + +func (r *TestingEventRecorder) WithComponentSuffix(suffix string) events.Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *TestingEventRecorder) Event(reason, message string) { + r.t.Logf("Event: %v: %v", reason, message) +} + +func (r *TestingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *TestingEventRecorder) Warning(reason, message string) { + r.t.Logf("Warning: %v: %v", reason, message) +} + +func (r *TestingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_wrapper.go b/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_wrapper.go new file mode 100644 index 00000000000..38bc9a523dd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/eventstesting/recorder_wrapper.go @@ -0,0 +1,51 @@ +package eventstesting + +import ( + "testing" + + "github.com/openshift/library-go/pkg/operator/events" +) + +type EventRecorder struct { + realEventRecorder events.Recorder + testingEventRecorder *TestingEventRecorder +} + +func NewEventRecorder(t *testing.T, r events.Recorder) events.Recorder { + return &EventRecorder{ + testingEventRecorder: NewTestingEventRecorder(t).(*TestingEventRecorder), + realEventRecorder: r, + } +} + +func (e *EventRecorder) Event(reason, message string) { + e.realEventRecorder.Event(reason, message) + e.testingEventRecorder.Event(reason, message) +} + +func (e *EventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + e.realEventRecorder.Eventf(reason, messageFmt, args...) + e.testingEventRecorder.Eventf(reason, messageFmt, args...) +} + +func (e *EventRecorder) Warning(reason, message string) { + e.realEventRecorder.Warning(reason, message) + e.testingEventRecorder.Warning(reason, message) +} + +func (e *EventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + e.realEventRecorder.Warningf(reason, messageFmt, args...) + e.testingEventRecorder.Warningf(reason, messageFmt, args...) +} + +func (e *EventRecorder) ForComponent(componentName string) events.Recorder { + return e +} + +func (e *EventRecorder) WithComponentSuffix(componentNameSuffix string) events.Recorder { + return e +} + +func (e *EventRecorder) ComponentName() string { + return "test-recorder" +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go new file mode 100644 index 00000000000..03bceede8f0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -0,0 +1,208 @@ +package events + +import ( + "fmt" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string +} + +// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set. +// This replica set name is then used as a source/involved object for operator events. +const podNameEnv = "POD_NAME" + +// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests). +var podNameEnvFunc = func() string { + return os.Getenv(podNameEnv) +} + +// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. +// The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. +func GetControllerReferenceForCurrentPod(client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { + if reference == nil { + // Try to get the pod name via POD_NAME environment variable + reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace} + if len(reference.Name) != 0 { + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list + reference, err := guessControllerReferenceForNamespace(client.CoreV1().Pods(targetNamespace)) + if err != nil { + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err + } + return GetControllerReferenceForCurrentPod(client, targetNamespace, reference) + } + + switch reference.Kind { + case "Pod": + pod, err := client.CoreV1().Pods(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if podController := metav1.GetControllerOf(pod); podController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(podController, targetNamespace)) + } + // This is a bare pod without any ownerReference + return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil + case "ReplicaSet": + rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if rsController := metav1.GetControllerOf(rs); rsController != nil { + return GetControllerReferenceForCurrentPod(client, targetNamespace, makeObjectReference(rsController, targetNamespace)) + } + // This is a replicaSet without any ownerReference + return reference, nil + default: + return reference, nil + } +} + +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + +// makeObjectReference makes object reference from ownerReference and target namespace +func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: owner.Kind, + Namespace: targetNamespace, + Name: owner.Name, + UID: owner.UID, + APIVersion: owner.APIVersion, + } +} + +// guessControllerReferenceForNamespace tries to guess what resource to reference. +func guessControllerReferenceForNamespace(client corev1client.PodInterface) (*corev1.ObjectReference, error) { + pods, err := client.List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv) + } + + pod := &pods.Items[0] + ownerRef := metav1.GetControllerOf(pod) + return &corev1.ObjectReference{ + Kind: ownerRef.Kind, + Namespace: pod.Namespace, + Name: ownerRef.Name, + UID: ownerRef.UID, + APIVersion: ownerRef.APIVersion, + }, nil +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + if _, err := r.eventClient.Create(event); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + if _, err := r.eventClient.Create(event); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go new file mode 100644 index 00000000000..b64d9f6a987 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -0,0 +1,77 @@ +package events + +import ( + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog" +) + +type inMemoryEventRecorder struct { + events []*corev1.Event + source string + sync.Mutex +} + +// inMemoryDummyObjectReference is used for fake events. +var inMemoryDummyObjectReference = corev1.ObjectReference{ + Kind: "Pod", + Namespace: "dummy", + Name: "dummy", + APIVersion: "v1", +} + +type InMemoryRecorder interface { + Events() []*corev1.Event + Recorder +} + +// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. +// This recorder should be only used in unit tests. +func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { + return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +} + +func (r *inMemoryEventRecorder) ComponentName() string { + return r.source +} + +func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { + r.Lock() + defer r.Unlock() + r.source = component + return r +} + +func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Events returns list of recorded events +func (r *inMemoryEventRecorder) Events() []*corev1.Event { + return r.events +} + +func (r *inMemoryEventRecorder) Event(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *inMemoryEventRecorder) Warning(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + klog.Info(event.String()) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go new file mode 100644 index 00000000000..7f3b5cd8bd0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -0,0 +1,49 @@ +package events + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog" +) + +type LoggingEventRecorder struct { + component string +} + +// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. +func NewLoggingEventRecorder(component string) Recorder { + return &LoggingEventRecorder{component: component} +} + +func (r *LoggingEventRecorder) ComponentName() string { + return r.component +} + +func (r *LoggingEventRecorder) ForComponent(component string) Recorder { + newRecorder := *r + newRecorder.component = component + return &newRecorder +} + +func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *LoggingEventRecorder) Event(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + klog.Info(event.String()) +} + +func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *LoggingEventRecorder) Warning(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + klog.Warning(event.String()) +} + +func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_test.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_test.go new file mode 100644 index 00000000000..29af896d397 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_test.go @@ -0,0 +1,186 @@ +package events + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" +) + +func fakeControllerRef(t *testing.T) *corev1.ObjectReference { + podNameEnvFunc = func() string { + return "test" + } + client := fake.NewSimpleClientset(fakePod("test-namespace", "test"), fakeReplicaSet("test-namespace", "test")) + + ref, err := GetControllerReferenceForCurrentPod(client, "test-namespace", nil) + if err != nil { + t.Fatalf("unable to get object reference: %v", err) + } + return ref +} + +func fakePod(namespace, name string) *corev1.Pod { + pod := &corev1.Pod{} + pod.Name = name + pod.Namespace = namespace + truePtr := true + pod.SetOwnerReferences([]metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "ReplicaSet", + Name: "test", + UID: "05022234-d394-11e8-8169-42010a8e0003", + Controller: &truePtr, + BlockOwnerDeletion: &truePtr, + }, + }) + return pod +} + +func fakeReplicaSet(namespace, name string) *appsv1.ReplicaSet { + rs := &appsv1.ReplicaSet{} + rs.Name = name + rs.Namespace = namespace + truePtr := true + rs.SetOwnerReferences([]metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test", + UID: "15022234-d394-11e8-8169-42010a8e0003", + Controller: &truePtr, + BlockOwnerDeletion: &truePtr, + }, + }) + return rs +} + +func TestRecorder(t *testing.T) { + client := fake.NewSimpleClientset() + r := NewRecorder(client.CoreV1().Events("test-namespace"), "test-operator", fakeControllerRef(t)) + + r.Event("TestReason", "foo") + + var createdEvent *corev1.Event + + for _, action := range client.Actions() { + if action.Matches("create", "events") { + createAction := action.(clientgotesting.CreateAction) + createdEvent = createAction.GetObject().(*corev1.Event) + break + } + } + if createdEvent == nil { + t.Fatalf("expected event to be created") + } + if createdEvent.InvolvedObject.Kind != "Deployment" { + t.Errorf("expected involved object kind Deployment, got: %q", createdEvent.InvolvedObject.Kind) + } + if createdEvent.InvolvedObject.Namespace != "test-namespace" { + t.Errorf("expected involved object namespace test-namespace, got: %q", createdEvent.InvolvedObject.Namespace) + } + if createdEvent.Reason != "TestReason" { + t.Errorf("expected event to have TestReason, got %q", createdEvent.Reason) + } + if createdEvent.Message != "foo" { + t.Errorf("expected message to be foo, got %q", createdEvent.Message) + } + if createdEvent.Type != "Normal" { + t.Errorf("expected event type to be Normal, got %q", createdEvent.Type) + } + if createdEvent.Source.Component != "test-operator" { + t.Errorf("expected event source to be test-operator, got %q", createdEvent.Source.Component) + } +} + +func TestGetControllerReferenceForCurrentPodIsPod(t *testing.T) { + pod := fakePod("test", "test") + pod.OwnerReferences = []metav1.OwnerReference{} + client := fake.NewSimpleClientset(pod) + + podNameEnvFunc = func() string { + return "test" + } + + objectReference, err := GetControllerReferenceForCurrentPod(client, "test", nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if objectReference.Name != "test" { + t.Errorf("expected objectReference name to be 'test', got %q", objectReference.Name) + } + + if objectReference.GroupVersionKind().String() != "/v1, Kind=Pod" { + t.Errorf("expected objectReference to be Pod, got %q", objectReference.GroupVersionKind().String()) + } +} + +func TestGetControllerReferenceForCurrentPodIsReplicaSet(t *testing.T) { + rs := fakeReplicaSet("test", "test") + rs.OwnerReferences = []metav1.OwnerReference{} + client := fake.NewSimpleClientset(fakePod("test", "test"), rs) + + podNameEnvFunc = func() string { + return "test" + } + + objectReference, err := GetControllerReferenceForCurrentPod(client, "test", nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if objectReference.Name != "test" { + t.Errorf("expected objectReference name to be 'test', got %q", objectReference.Name) + } + + if objectReference.GroupVersionKind().String() != "apps/v1, Kind=ReplicaSet" { + t.Errorf("expected objectReference to be ReplicaSet, got %q", objectReference.GroupVersionKind().String()) + } +} + +func TestGetControllerReferenceForCurrentPod(t *testing.T) { + client := fake.NewSimpleClientset(fakePod("test", "test"), fakeReplicaSet("test", "test")) + + podNameEnvFunc = func() string { + return "test" + } + + objectReference, err := GetControllerReferenceForCurrentPod(client, "test", nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if objectReference.Name != "test" { + t.Errorf("expected objectReference name to be 'test', got %q", objectReference.Name) + } + + if objectReference.GroupVersionKind().String() != "apps/v1, Kind=Deployment" { + t.Errorf("expected objectReference to be Deployment, got %q", objectReference.GroupVersionKind().String()) + } +} + +func TestGetControllerReferenceForCurrentPodFallbackNamespace(t *testing.T) { + client := fake.NewSimpleClientset() + + podNameEnvFunc = func() string { + return "test" + } + + objectReference, err := GetControllerReferenceForCurrentPod(client, "test", nil) + if err == nil { + t.Fatalf("expected error: %v", err) + } + + if objectReference.Name != "test" { + t.Errorf("expected objectReference name to be 'test', got %q", objectReference.Name) + } + + if objectReference.GroupVersionKind().String() != "/v1, Kind=Namespace" { + t.Errorf("expected objectReference to be Namespace, got %q", objectReference.GroupVersionKind().String()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go new file mode 100644 index 00000000000..359d2eb81ea --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -0,0 +1,70 @@ +package events + +import ( + "fmt" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" +) + +// NewKubeRecorder returns new event recorder. +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return (&upstreamRecorder{ + client: client, + component: sourceComponentName, + involvedObjectRef: involvedObjectRef, + }).ForComponent(sourceComponentName) +} + +// upstreamRecorder is an implementation of Recorder interface. +type upstreamRecorder struct { + client corev1client.EventInterface + component string + broadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + involvedObjectRef *corev1.ObjectReference +} + +func (r *upstreamRecorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + broadcaster := record.NewBroadcaster() + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client}) + + newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName}) + newRecorderForComponent.component = componentName + + return &newRecorderForComponent +} + +func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *upstreamRecorder) ComponentName() string { + return r.component +} + +// Eventf emits the normal type event and allow formatting of message. +func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warningf emits the warning type event and allow formatting of message. +func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *upstreamRecorder) Event(reason, message string) { + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message) +} + +// Warning emits the warning type event. +func (r *upstreamRecorder) Warning(reason, message string) { + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go new file mode 100644 index 00000000000..e93572cdc1d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go @@ -0,0 +1,225 @@ +package genericoperatorclient + +import ( + "reflect" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/v1helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/informers" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const globalConfigName = "cluster" + +func NewClusterScopedOperatorClient(config *rest.Config, gvr schema.GroupVersionResource) (v1helpers.OperatorClient, dynamicinformer.DynamicSharedInformerFactory, error) { + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + client := dynamicClient.Resource(gvr) + + informers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 12*time.Hour) + informer := informers.ForResource(gvr) + + return &dynamicOperatorClient{ + informer: informer, + client: client, + }, informers, nil +} + +type dynamicOperatorClient struct { + informer informers.GenericInformer + client dynamic.ResourceInterface +} + +func (c dynamicOperatorClient) Informer() cache.SharedIndexInformer { + return c.informer.Informer() +} + +func (c dynamicOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + uncastInstance, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, nil, "", err + } + instance := uncastInstance.(*unstructured.Unstructured) + + spec, err := getOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +// UpdateOperatorSpec overwrites the operator object spec with the values given +// in operatorv1.OperatorSpec while preserving pre-existing spec fields that have +// no correspondence in operatorv1.OperatorSpec. +func (c dynamicOperatorClient) UpdateOperatorSpec(resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) { + uncastOriginal, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, "", err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setOperatorSpecFromUnstructured(copy.UnstructuredContent(), spec); err != nil { + return nil, "", err + } + + ret, err := c.client.Update(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, "", err + } + retSpec, err := getOperatorSpecFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, "", err + } + + return retSpec, ret.GetResourceVersion(), nil +} + +// UpdateOperatorStatus overwrites the operator object status with the values given +// in operatorv1.OperatorStatus while preserving pre-existing status fields that have +// no correspondence in operatorv1.OperatorStatus. +func (c dynamicOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + uncastOriginal, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setOperatorStatusFromUnstructured(copy.UnstructuredContent(), status); err != nil { + return nil, err + } + + ret, err := c.client.UpdateStatus(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + retStatus, err := getOperatorStatusFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, err + } + + return retStatus, nil +} + +func getOperatorSpecFromUnstructured(obj map[string]interface{}) (*operatorv1.OperatorSpec, error) { + uncastSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return &operatorv1.OperatorSpec{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.OperatorSpec{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastSpec, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setOperatorSpecFromUnstructured(obj map[string]interface{}, spec *operatorv1.OperatorSpec) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, + // like say a static pod operator spec when cast as an operator spec + newSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + origSpec, preExistingSpec, err := unstructured.NestedMap(obj, "spec") + if err != nil { + return err + } + if preExistingSpec { + flds := topLevelFields(*spec) + for k, v := range origSpec { + if !flds[k] { + if err := unstructured.SetNestedField(newSpec, v, k); err != nil { + return err + } + } + } + } + return unstructured.SetNestedMap(obj, newSpec, "spec") +} + +func getOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1.OperatorStatus, error) { + uncastStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return &operatorv1.OperatorStatus{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.OperatorStatus{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastStatus, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setOperatorStatusFromUnstructured(obj map[string]interface{}, status *operatorv1.OperatorStatus) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, + // like say a static pod operator status when cast as an operator status + newStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(status) + if err != nil { + return err + } + + origStatus, preExistingStatus, err := unstructured.NestedMap(obj, "status") + if err != nil { + return err + } + if preExistingStatus { + flds := topLevelFields(*status) + for k, v := range origStatus { + if !flds[k] { + if err := unstructured.SetNestedField(newStatus, v, k); err != nil { + return err + } + } + } + } + return unstructured.SetNestedMap(obj, newStatus, "status") +} + +func topLevelFields(obj interface{}) map[string]bool { + ret := map[string]bool{} + t := reflect.TypeOf(obj) + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + fieldName := fld.Name + if jsonTag := fld.Tag.Get("json"); jsonTag == "-" { + continue + } else if jsonTag != "" { + // check for possible comma as in "...,omitempty" + var commaIdx int + if commaIdx = strings.Index(jsonTag, ","); commaIdx < 0 { + commaIdx = len(jsonTag) + } + fieldName = jsonTag[:commaIdx] + } + ret[fieldName] = true + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go new file mode 100644 index 00000000000..df255057347 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go @@ -0,0 +1,221 @@ +package genericoperatorclient + +import ( + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/diff" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +func TestSetOperatorSpecFromUnstructured(t *testing.T) { + tests := []struct { + name string + + in map[string]interface{} + spec *operatorv1.OperatorSpec + expected map[string]interface{} + }{ + { + name: "keep-unknown", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "non-standard-field": "value", + }, + }, + spec: &operatorv1.OperatorSpec{ + LogLevel: operatorv1.Trace, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "non-standard-field": "value", + "logLevel": "Trace", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "keep-everything-outside-of-spec", + in: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "status": map[string]interface{}{"foo": "bar"}, + "spec": map[string]interface{}{}, + }, + spec: &operatorv1.OperatorSpec{}, + expected: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "status": map[string]interface{}{"foo": "bar"}, + "spec": map[string]interface{}{ + "logLevel": "", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "replace-rawextensions", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "unsupportedConfigOverrides": map[string]interface{}{"foo": "bar"}, + }, + }, + spec: &operatorv1.OperatorSpec{ + LogLevel: operatorv1.Trace, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "logLevel": "Trace", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": nil, + }, + }, + }, + { + name: "remove-observed-fields", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "observedConfig": map[string]interface{}{"a": "1", "b": "2"}, + }, + }, + spec: &operatorv1.OperatorSpec{ + ObservedConfig: runtime.RawExtension{Raw: []byte(`{"a":1}`)}, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "logLevel": "", + "managementState": "", + "operatorLogLevel": "", + "unsupportedConfigOverrides": nil, + "observedConfig": map[string]interface{}{"a": int64(1)}, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setOperatorSpecFromUnstructured(test.in, test.spec) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.in, test.expected) { + t.Errorf(diff.ObjectDiff(test.in, test.expected)) + } + }) + } +} + +func TestSetOperatorStatusFromUnstructured(t *testing.T) { + tests := []struct { + name string + + in map[string]interface{} + status *operatorv1.OperatorStatus + expected map[string]interface{} + }{ + { + name: "keep-unknown", + in: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + }, + }, + status: &operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "Degraded", + }, + }, + }, + expected: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "Degraded", + }, + }, + "readyReplicas": int64(0), + }, + }, + }, + { + name: "keep-everything-outside-of-status", + in: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "spec": map[string]interface{}{"foo": "bar"}, + "status": map[string]interface{}{}, + }, + status: &operatorv1.OperatorStatus{}, + expected: map[string]interface{}{ + "kind": "Foo", + "apiVersion": "bar/v1", + "spec": map[string]interface{}{"foo": "bar"}, + "status": map[string]interface{}{ + "readyReplicas": int64(0), + }, + }, + }, + { + name: "replace-condition", + in: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "overwriteme", + }, + }, + }, + }, + status: &operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "Degraded", + }, + }, + }, + expected: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "Degraded", + }, + }, + "readyReplicas": int64(0), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setOperatorStatusFromUnstructured(test.in, test.status) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.in, test.expected) { + t.Errorf(diff.ObjectGoPrintDiff(test.in, test.expected)) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go new file mode 100644 index 00000000000..35ae57d14d7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go @@ -0,0 +1,201 @@ +package genericoperatorclient + +import ( + "time" + + "github.com/imdario/mergo" + + "k8s.io/apimachinery/pkg/runtime" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/v1helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/rest" +) + +func NewStaticPodOperatorClient(config *rest.Config, gvr schema.GroupVersionResource) (v1helpers.StaticPodOperatorClient, dynamicinformer.DynamicSharedInformerFactory, error) { + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + client := dynamicClient.Resource(gvr) + + informers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 12*time.Hour) + informer := informers.ForResource(gvr) + + return &dynamicStaticPodOperatorClient{ + dynamicOperatorClient: dynamicOperatorClient{ + informer: informer, + client: client, + }, + }, informers, nil +} + +type dynamicStaticPodOperatorClient struct { + dynamicOperatorClient +} + +func (c dynamicStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + uncastInstance, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, nil, "", err + } + instance := uncastInstance.(*unstructured.Unstructured) + + spec, err := getStaticPodOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getStaticPodOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + instance, err := c.client.Get("cluster", metav1.GetOptions{}) + if err != nil { + return nil, nil, "", err + } + + spec, err := getStaticPodOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getStaticPodOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + uncastOriginal, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, "", err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setStaticPodOperatorSpecFromUnstructured(copy.UnstructuredContent(), spec); err != nil { + return nil, "", err + } + + ret, err := c.client.Update(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, "", err + } + retSpec, err := getStaticPodOperatorSpecFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, "", err + } + + return retSpec, ret.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + uncastOriginal, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setStaticPodOperatorStatusFromUnstructured(copy.UnstructuredContent(), status); err != nil { + return nil, err + } + + ret, err := c.client.UpdateStatus(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + retStatus, err := getStaticPodOperatorStatusFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, err + } + + return retStatus, nil +} + +func getStaticPodOperatorSpecFromUnstructured(obj map[string]interface{}) (*operatorv1.StaticPodOperatorSpec, error) { + uncastSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return &operatorv1.StaticPodOperatorSpec{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.StaticPodOperatorSpec{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastSpec, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setStaticPodOperatorSpecFromUnstructured(obj map[string]interface{}, spec *operatorv1.StaticPodOperatorSpec) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredSpec, "spec") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredSpec, newUnstructuredSpec, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredSpec, "spec") +} + +func getStaticPodOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1.StaticPodOperatorStatus, error) { + uncastStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return &operatorv1.StaticPodOperatorStatus{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.StaticPodOperatorStatus{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastStatus, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setStaticPodOperatorStatusFromUnstructured(obj map[string]interface{}, spec *operatorv1.StaticPodOperatorStatus) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredStatus, "status") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredStatus, newUnstructuredStatus, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredStatus, "status") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go new file mode 100644 index 00000000000..74f69e92918 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go @@ -0,0 +1,126 @@ +package loglevel + +import ( + "context" + "fmt" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var workQueueKey = "instance" + +type LogLevelController struct { + operatorClient operatorv1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// sets the klog level based on desired state +func NewClusterOperatorLoggingController( + operatorClient operatorv1helpers.OperatorClient, + recorder events.Recorder, +) *LogLevelController { + c := &LogLevelController{ + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("loglevel-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "LoggingSyncer"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c LogLevelController) sync() error { + detailedSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + + currentLogLevel := CurrentLogLevel() + desiredLogLevel := detailedSpec.OperatorLogLevel + + if len(desiredLogLevel) == 0 { + desiredLogLevel = operatorv1.Normal + } + + // When the current loglevel is the desired one, do nothing + if currentLogLevel == desiredLogLevel { + return nil + } + + // Set the new loglevel if the operator spec changed + if err := SetVerbosityValue(desiredLogLevel); err != nil { + c.eventRecorder.Warningf("OperatorLoglevelChangeFailed", "Unable to change operator log level from %q to %q: %v", currentLogLevel, desiredLogLevel, err) + return err + } + + c.eventRecorder.Eventf("OperatorLoglevelChange", "Operator log level changed from %q to %q", currentLogLevel, desiredLogLevel) + return nil +} + +func (c *LogLevelController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting LogLevelController") + defer klog.Infof("Shutting down LogLevelController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *LogLevelController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *LogLevelController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and loglevel +func (c *LogLevelController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller_test.go new file mode 100644 index 00000000000..ff21148d477 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller_test.go @@ -0,0 +1,68 @@ +package loglevel + +import ( + "testing" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func TestClusterOperatorLoggingController(t *testing.T) { + if err := SetVerbosityValue(operatorv1.Normal); err != nil { + t.Fatal(err) + } + + operatorSpec := &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + } + + fakeStaticPodOperatorClient := v1helpers.NewFakeOperatorClient( + operatorSpec, + &operatorv1.OperatorStatus{}, + nil, + ) + + recorder := events.NewInMemoryRecorder("") + + controller := NewClusterOperatorLoggingController(fakeStaticPodOperatorClient, recorder) + + // no-op, desired == current + // When OperatorLogLevel is "" we assume the loglevel is Normal. + if err := controller.sync(); err != nil { + t.Fatal(err) + } + + if len(recorder.Events()) > 0 { + t.Fatalf("expected zero events, got %d", len(recorder.Events())) + } + + // change the log level to trace should 1 emit event + operatorSpec.OperatorLogLevel = operatorv1.Trace + if err := controller.sync(); err != nil { + t.Fatal(err) + } + + if operatorEvents := recorder.Events(); len(operatorEvents) == 1 { + expectedEventMessage := `Operator log level changed from "Normal" to "Trace"` + if message := operatorEvents[0].Message; message != expectedEventMessage { + t.Fatalf("expected event message %q, got %q", expectedEventMessage, message) + } + } else { + t.Fatalf("expected 1 event, got %d", len(operatorEvents)) + } + + // next sync should not produce any extra event + if err := controller.sync(); err != nil { + t.Fatal(err) + } + + if operatorEvents := recorder.Events(); len(operatorEvents) != 1 { + t.Fatalf("expected 1 event recorded, got %d", len(operatorEvents)) + } + + if current := CurrentLogLevel(); current != operatorv1.Trace { + t.Fatalf("expected log level 'Trace', got %v", current) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go new file mode 100644 index 00000000000..91e4251f06e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go @@ -0,0 +1,89 @@ +package loglevel + +import ( + "flag" + "fmt" + + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// LogLevelToKlog transforms operator log level to a klog numeric verbosity level. +func LogLevelToKlog(logLevel operatorv1.LogLevel) int { + switch logLevel { + case operatorv1.Normal: + return 2 + case operatorv1.Debug: + return 4 + case operatorv1.Trace: + return 6 + case operatorv1.TraceAll: + return 8 + default: + return 2 + } +} + +// CurrentLogLevel attempts to guess the current log level that is used by klog. +// We can use flags here as well, but this is less ugly ano more programmatically correct than flags. +func CurrentLogLevel() operatorv1.LogLevel { + switch { + case klog.V(8) == true: + return operatorv1.TraceAll + case klog.V(6) == true: + return operatorv1.Trace + case klog.V(4) == true: + return operatorv1.Debug + case klog.V(2) == true: + return operatorv1.Normal + default: + return operatorv1.Normal + } +} + +// SetVerbosityValue is a nasty hack and attempt to manipulate the global flags as klog does not expose +// a way to dynamically change the loglevel in runtime. +func SetVerbosityValue(logLevel operatorv1.LogLevel) error { + if logLevel == CurrentLogLevel() { + return nil + } + + var level *klog.Level + + // Convert operator loglevel to klog numeric string + desiredLevelValue := fmt.Sprintf("%d", LogLevelToKlog(logLevel)) + + // First, if the '-v' was specified in command line, attempt to acquire the level pointer from it. + if f := flag.CommandLine.Lookup("v"); f != nil { + if flagValue, ok := f.Value.(*klog.Level); ok { + level = flagValue + } + } + + // Second, if the '-v' was not set but is still present in flags defined for the command, attempt to acquire it + // by visiting all flags. + if level == nil { + flag.VisitAll(func(f *flag.Flag) { + if level != nil { + return + } + if levelFlag, ok := f.Value.(*klog.Level); ok { + level = levelFlag + } + }) + } + + if level != nil { + return level.Set(desiredLevelValue) + } + + // Third, if modifying the flag value (which is recommended by klog) fails, then fallback to modifying + // the internal state of klog using the empty new level. + var newLevel klog.Level + if err := newLevel.Set(desiredLevelValue); err != nil { + return fmt.Errorf("failed set klog.logging.verbosity %s: %v", desiredLevelValue, err) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go new file mode 100644 index 00000000000..78acc00d5c4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go @@ -0,0 +1,69 @@ +package management + +import ( + "github.com/openshift/api/operator/v1" +) + +var ( + allowOperatorUnmanagedState = true + allowOperatorRemovedState = true +) + +// These are for unit testing +var ( + getAllowedOperatorUnmanaged = func() bool { + return allowOperatorUnmanagedState + } + getAllowedOperatorRemovedState = func() bool { + return allowOperatorRemovedState + } +) + +// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state. +// This is a case of control plane operators or operators that are required to always run otherwise the cluster will +// get into unstable state or critical components will stop working. +func SetOperatorAlwaysManaged() { + allowOperatorUnmanagedState = false +} + +// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support +// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a +// bricked, non-automatically recoverable state. +func SetOperatorNotRemovable() { + allowOperatorRemovedState = false +} + +// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state. +func IsOperatorAlwaysManaged() bool { + return !getAllowedOperatorUnmanaged() +} + +// IsOperatorNotRemovable means the operator can't bet set to removed state. +func IsOperatorNotRemovable() bool { + return !getAllowedOperatorRemovedState() +} + +func IsOperatorUnknownState(state v1.ManagementState) bool { + switch state { + case v1.Managed, v1.Removed, v1.Unmanaged: + return false + default: + return true + } +} + +// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand. +func IsOperatorManaged(state v1.ManagementState) bool { + if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() { + return true + } + switch state { + case v1.Managed: + return true + case v1.Removed: + return false + case v1.Unmanaged: + return false + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go new file mode 100644 index 00000000000..10bbec3f0d7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go @@ -0,0 +1,145 @@ +package management + +import ( + "context" + "fmt" + "strings" + "time" + + "k8s.io/klog" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var workQueueKey = "instance" + +// ManagementStateController watches changes of `managementState` field and react in case that field is set to an unsupported value. +// As each operator can opt-out from supporting `unmanaged` or `removed` states, this controller will add failing condition when the +// value for this field is set to this values for those operators. +type ManagementStateController struct { + operatorName string + operatorClient operatorv1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +func NewOperatorManagementStateController( + name string, + operatorClient operatorv1helpers.OperatorClient, + recorder events.Recorder, +) *ManagementStateController { + c := &ManagementStateController{ + operatorName: name, + operatorClient: operatorClient, + eventRecorder: recorder, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ManagementStateController_"+strings.Replace(name, "-", "_", -1)), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +func (c ManagementStateController) sync() error { + detailedSpec, _, _, err := c.operatorClient.GetOperatorState() + if apierrors.IsNotFound(err) { + c.eventRecorder.Warningf("StatusNotFound", "Unable to determine current operator status for %s", c.operatorName) + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: condition.ManagementStateDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + + if IsOperatorAlwaysManaged() && detailedSpec.ManagementState == operatorv1.Unmanaged { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Unmanaged" + cond.Message = fmt.Sprintf("Unmanaged is not supported for %s operator", c.operatorName) + } + + if IsOperatorNotRemovable() && detailedSpec.ManagementState == operatorv1.Removed { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Removed" + cond.Message = fmt.Sprintf("Removed is not supported for %s operator", c.operatorName) + } + + if IsOperatorUnknownState(detailedSpec.ManagementState) { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Unknown" + cond.Message = fmt.Sprintf("Unsupported management state %q for %s operator", detailedSpec.ManagementState, c.operatorName) + } + + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return nil +} + +func (c *ManagementStateController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting management-state-controller-" + c.operatorName) + defer klog.Infof("Shutting down management-state-controller-" + c.operatorName) + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *ManagementStateController) runWorker(_ context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *ManagementStateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *ManagementStateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller_test.go new file mode 100644 index 00000000000..48d3ce6264e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller_test.go @@ -0,0 +1,128 @@ +package management + +import ( + "testing" + + operatorv1 "github.com/openshift/api/operator/v1" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestOperatorManagementStateController(t *testing.T) { + testCases := []struct { + name string + initialConditions []operatorv1.OperatorCondition + managementState string + allowUnmanaged func() bool + allowRemove func() bool + + expectedFailingStatus bool + expectedMessage string + }{ + { + name: "operator in managed state with no restrictions", + managementState: string(operatorv1.Managed), + allowRemove: func() bool { return true }, + allowUnmanaged: func() bool { return true }, + }, + { + name: "operator in unmanaged state with no restrictions", + managementState: string(operatorv1.Unmanaged), + allowRemove: func() bool { return true }, + allowUnmanaged: func() bool { return true }, + }, + { + name: "operator in unknown state with no restrictions", + managementState: string("UnknownState"), + expectedFailingStatus: true, + expectedMessage: `Unsupported management state "UnknownState" for OPERATOR_NAME operator`, + allowRemove: func() bool { return true }, + allowUnmanaged: func() bool { return true }, + }, + { + name: "operator in unmanaged state with unmanaged not allowed", + managementState: string(operatorv1.Unmanaged), + expectedFailingStatus: true, + expectedMessage: `Unmanaged is not supported for OPERATOR_NAME operator`, + allowRemove: func() bool { return true }, + allowUnmanaged: func() bool { return false }, + }, + { + name: "operator in removed state with removed not allowed", + managementState: string(operatorv1.Removed), + expectedFailingStatus: true, + expectedMessage: `Removed is not supported for OPERATOR_NAME operator`, + allowRemove: func() bool { return false }, + allowUnmanaged: func() bool { return false }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + getAllowedOperatorRemovedState = tc.allowRemove + getAllowedOperatorUnmanaged = tc.allowUnmanaged + + statusClient := &statusClient{ + t: t, + spec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.ManagementState(tc.managementState), + }, + status: operatorv1.OperatorStatus{ + Conditions: tc.initialConditions, + }, + } + controller := &ManagementStateController{ + operatorName: "OPERATOR_NAME", + operatorClient: statusClient, + eventRecorder: events.NewInMemoryRecorder("status"), + } + if err := controller.sync(); err != nil { + t.Errorf("unexpected sync error: %v", err) + return + } + + _, result, _, _ := statusClient.GetOperatorState() + + if tc.expectedFailingStatus && result.Conditions[0].Type == "ManagementStateDegraded" && result.Conditions[0].Status == operatorv1.ConditionFalse { + t.Errorf("expected failing conditions") + return + } + + if !tc.expectedFailingStatus && result.Conditions[0].Type == "ManagementStateDegraded" && result.Conditions[0].Status != operatorv1.ConditionFalse { + t.Errorf("unexpected failing conditions: %#v", result.Conditions) + return + } + + if tc.expectedFailingStatus { + if result.Conditions[0].Message != tc.expectedMessage { + t.Errorf("expected message %q, got %q", result.Conditions[0].Message, tc.expectedMessage) + } + } + }) + } +} + +// OperatorStatusProvider +type statusClient struct { + t *testing.T + spec operatorv1.OperatorSpec + status operatorv1.OperatorStatus +} + +func (c *statusClient) Informer() cache.SharedIndexInformer { + c.t.Log("Informer called") + return nil +} + +func (c *statusClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.spec, &c.status, "", nil +} + +func (c *statusClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("missing") +} + +func (c *statusClient) UpdateOperatorStatus(version string, s *operatorv1.OperatorStatus) (status *operatorv1.OperatorStatus, err error) { + c.status = *s + return &c.status, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go new file mode 100644 index 00000000000..019b61ce482 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go @@ -0,0 +1,45 @@ +package options + +// ManifestConfig is a struct of values to be used in manifest templates. +type ManifestConfig struct { + // ConfigHostPath is a host path mounted into the controller manager pods to hold the config file. + ConfigHostPath string + + // ConfigFileName is the filename of config file inside ConfigHostPath. + ConfigFileName string + + // CloudProviderHostPath is a host path mounted into the apiserver pods to hold cloud provider configuration. + CloudProviderHostPath string + + // SecretsHostPath holds certs and keys + SecretsHostPath string + + // Namespace is the target namespace for the bootstrap controller manager to be created. + Namespace string + + // Image is the pull spec of the image to use for the controller manager. + Image string + + // OperatorImage is the pull spec of the image to use for the operator (optional). + OperatorImage string + + // ImagePullPolicy specifies the image pull policy to use for the images. + ImagePullPolicy string +} + +// FileConfig +type FileConfig struct { + // BootstrapConfig holds the rendered control plane component config file for bootstrapping (phase 1). + BootstrapConfig []byte + + // PostBootstrapConfig holds the rendered control plane component config file after bootstrapping (phase 2). + PostBootstrapConfig []byte + + // Assets holds the loaded assets like certs and keys. + Assets map[string][]byte +} + +type TemplateData struct { + ManifestConfig + FileConfig +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/generic.go new file mode 100644 index 00000000000..d025cd64617 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/generic.go @@ -0,0 +1,151 @@ +package options + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "text/template" + + "github.com/ghodss/yaml" + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GenericOptions contains the generic render command options. +type GenericOptions struct { + DefaultFile string + BootstrapOverrideFile string + PostBootstrapOverrideFile string + AdditionalConfigOverrideFiles []string + + ConfigOutputFile string + + TemplatesDir string + AssetInputDir string + AssetOutputDir string +} + +type Template struct { + FileName string + Content []byte +} + +// NewGenericOptions returns a default set of generic options. +func NewGenericOptions() *GenericOptions { + return &GenericOptions{ + TemplatesDir: "/usr/share/bootkube/manifests", + } +} + +// AddFlags adds the generic flags to the flagset. +func (o *GenericOptions) AddFlags(fs *pflag.FlagSet, configGVK schema.GroupVersionKind) { + fs.StringVar(&o.AssetOutputDir, "asset-output-dir", o.AssetOutputDir, "Output path for rendered manifests.") + fs.StringVar(&o.AssetInputDir, "asset-input-dir", o.AssetInputDir, "A path to directory with certificates and secrets.") + fs.StringVar(&o.TemplatesDir, "templates-input-dir", o.TemplatesDir, "A path to a directory with manifest templates.") + fs.StringSliceVar(&o.AdditionalConfigOverrideFiles, "config-override-files", o.AdditionalConfigOverrideFiles, + fmt.Sprintf("Additional sparse %s files for customiziation through the installer, merged into the default config in the given order.", gvkOutput{configGVK})) + fs.StringVar(&o.ConfigOutputFile, "config-output-file", o.ConfigOutputFile, fmt.Sprintf("Output path for the %s yaml file.", gvkOutput{configGVK})) +} + +type gvkOutput struct { + schema.GroupVersionKind +} + +func (gvk gvkOutput) String() string { + return fmt.Sprintf("%s.%s/%s", gvk.GroupVersionKind.Kind, gvk.GroupVersionKind.Group, gvk.GroupVersionKind.Version) +} + +// Complete fills in missing values before execution. +func (o *GenericOptions) Complete() error { + return nil +} + +// Validate verifies the inputs. +func (o *GenericOptions) Validate() error { + if len(o.AssetInputDir) == 0 { + return errors.New("missing required flag: --asset-input-dir") + } + if len(o.AssetOutputDir) == 0 { + return errors.New("missing required flag: --asset-output-dir") + } + if len(o.TemplatesDir) == 0 { + return errors.New("missing required flag: --templates-dir") + } + if len(o.ConfigOutputFile) == 0 { + return errors.New("missing required flag: --config-output-file") + } + + return nil +} + +// ApplyTo applies the options ot the given config struct using the provides text/template data. +func (o *GenericOptions) ApplyTo(cfg *FileConfig, defaultConfig, bootstrapOverrides, postBootstrapOverrides Template, templateData interface{}, specialCases map[string]resourcemerge.MergeFunc) error { + var err error + + cfg.BootstrapConfig, err = o.configFromDefaultsPlusOverride(defaultConfig, bootstrapOverrides, templateData, specialCases) + if err != nil { + return fmt.Errorf("failed to generate bootstrap config (phase 1): %v", err) + } + + if cfg.PostBootstrapConfig, err = o.configFromDefaultsPlusOverride(defaultConfig, postBootstrapOverrides, templateData, specialCases); err != nil { + return fmt.Errorf("failed to generate post-bootstrap config (phase 2): %v", err) + } + + // load and render templates + if cfg.Assets, err = assets.LoadFilesRecursively(o.AssetInputDir); err != nil { + return fmt.Errorf("failed loading assets from %q: %v", o.AssetInputDir, err) + } + + return nil +} + +func (o *GenericOptions) configFromDefaultsPlusOverride(defaultConfig, overrides Template, templateData interface{}, specialCases map[string]resourcemerge.MergeFunc) ([]byte, error) { + defaultConfigContent, err := renderTemplate(defaultConfig, templateData) + if err != nil { + return nil, fmt.Errorf("failed to render default config file %q as text/template: %v", defaultConfig.FileName, err) + } + + overridesContent, err := renderTemplate(overrides, templateData) + if err != nil { + return nil, fmt.Errorf("failed to render config override file %q as text/template: %v", overrides.FileName, err) + } + configs := [][]byte{defaultConfigContent, overridesContent} + for _, fname := range o.AdditionalConfigOverrideFiles { + bs, err := ioutil.ReadFile(fname) + if err != nil { + return nil, fmt.Errorf("failed to load config overrides at %q: %v", fname, err) + } + overrides, err := renderTemplate(Template{fname, bs}, templateData) + if err != nil { + return nil, fmt.Errorf("failed to render config overrides file %q as text/template: %v", fname, err) + } + + configs = append(configs, overrides) + } + mergedConfig, err := resourcemerge.MergeProcessConfig(specialCases, configs...) + if err != nil { + return nil, fmt.Errorf("failed to merge configs: %v", err) + } + yml, err := yaml.JSONToYAML(mergedConfig) + if err != nil { + return nil, err + } + + return yml, nil +} + +func renderTemplate(tpl Template, data interface{}) ([]byte, error) { + tmpl, err := template.New(tpl.FileName).Parse(string(tpl.Content)) + if err != nil { + return nil, err + } + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go new file mode 100644 index 00000000000..7c58c648b92 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go @@ -0,0 +1,98 @@ +package options + +import ( + "errors" + "fmt" + + "github.com/spf13/pflag" +) + +// ManifestOptions contains the values that influence manifest contents. +type ManifestOptions struct { + Namespace string + Image, OperatorImage string + ImagePullPolicy string + ConfigHostPath string + ConfigFileName string + CloudProviderHostPath string + SecretsHostPath string +} + +// NewManifestOptions return default values for ManifestOptions. +func NewManifestOptions(componentName, image string) *ManifestOptions { + return &ManifestOptions{ + Namespace: fmt.Sprintf("openshift-%s", componentName), + Image: image, + ImagePullPolicy: "IfNotPresent", + ConfigHostPath: "/etc/kubernetes/bootstrap-configs", + ConfigFileName: fmt.Sprintf("%s-config.yaml", componentName), + CloudProviderHostPath: "/etc/kubernetes/cloud", + SecretsHostPath: "/etc/kubernetes/bootstrap-secrets", + } +} + +// AddfFlags adds the manifest related flags to the flagset. +func (o *ManifestOptions) AddFlags(fs *pflag.FlagSet, humanReadableComponentName string) { + fs.StringVar(&o.Namespace, "manifest-namespace", o.Namespace, + fmt.Sprintf("Target namespace for phase 3 %s pods.", humanReadableComponentName)) + fs.StringVar(&o.Image, "manifest-image", o.Image, + fmt.Sprintf("Image to use for the %s.", humanReadableComponentName)) + fs.StringVar(&o.OperatorImage, "manifest-operator-image", o.OperatorImage, + fmt.Sprintf("Operator image to use for the %s.", humanReadableComponentName)) + fs.StringVar(&o.ImagePullPolicy, "manifest-image-pull-policy", o.ImagePullPolicy, + fmt.Sprintf("Image pull policy to use for the %s.", humanReadableComponentName)) + fs.StringVar(&o.ConfigHostPath, "manifest-config-host-path", o.ConfigHostPath, + fmt.Sprintf("A host path mounted into the %s pods to hold a config file.", humanReadableComponentName)) + fs.StringVar(&o.SecretsHostPath, "manifest-secrets-host-path", o.SecretsHostPath, + fmt.Sprintf("A host path mounted into the %s pods to hold secrets.", humanReadableComponentName)) + fs.StringVar(&o.ConfigFileName, "manifest-config-file-name", o.ConfigFileName, + "The config file name inside the manifest-config-host-path.") + fs.StringVar(&o.CloudProviderHostPath, "manifest-cloud-provider-host-path", o.CloudProviderHostPath, + fmt.Sprintf("A host path mounted into the %s pods to hold cloud provider configuration.", humanReadableComponentName)) +} + +// Complete fills in missing values before execution. +func (o *ManifestOptions) Complete() error { + return nil +} + +// Validate verifies the inputs. +func (o *ManifestOptions) Validate() error { + if len(o.Namespace) == 0 { + return errors.New("missing required flag: --manifest-namespace") + } + if len(o.Image) == 0 { + return errors.New("missing required flag: --manifest-image") + } + if len(o.ImagePullPolicy) == 0 { + return errors.New("missing required flag: --manifest-image-pull-policy") + } + if len(o.ConfigHostPath) == 0 { + return errors.New("missing required flag: --manifest-config-host-path") + } + if len(o.ConfigFileName) == 0 { + return errors.New("missing required flag: --manifest-config-file-name") + } + if len(o.CloudProviderHostPath) == 0 { + return errors.New("missing required flag: --manifest-cloud-provider-host-path") + } + if len(o.SecretsHostPath) == 0 { + return errors.New("missing required flag: --manifest-secrets-host-path") + } + + return nil +} + +// ApplyTo applies the options ot the given config struct. +func (o *ManifestOptions) ApplyTo(cfg *ManifestConfig) error { + cfg.Namespace = o.Namespace + cfg.Image = o.Image + cfg.OperatorImage = o.OperatorImage + cfg.ImagePullPolicy = o.ImagePullPolicy + cfg.ConfigHostPath = o.ConfigHostPath + cfg.ConfigFileName = o.ConfigFileName + cfg.CloudProviderHostPath = o.CloudProviderHostPath + cfg.SecretsHostPath = o.SecretsHostPath + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/render.go b/vendor/github.com/openshift/library-go/pkg/operator/render/render.go new file mode 100644 index 00000000000..3bbd7a03a43 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/render.go @@ -0,0 +1,31 @@ +package render + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/render/options" +) + +// WriteFiles writes the manifests and the bootstrap config file. +func WriteFiles(opt *options.GenericOptions, fileConfig *options.FileConfig, templateData interface{}, additionalPredicates ...assets.FileInfoPredicate) error { + // write assets + for _, manifestDir := range []string{"bootstrap-manifests", "manifests"} { + manifests, err := assets.New(filepath.Join(opt.TemplatesDir, manifestDir), templateData, append(additionalPredicates, assets.OnlyYaml)...) + if err != nil { + return fmt.Errorf("failed rendering assets: %v", err) + } + if err := manifests.WriteFiles(filepath.Join(opt.AssetOutputDir, manifestDir)); err != nil { + return fmt.Errorf("failed writing assets to %q: %v", filepath.Join(opt.AssetOutputDir, manifestDir), err) + } + } + + // create bootstrap configuration + if err := ioutil.WriteFile(opt.ConfigOutputFile, fileConfig.BootstrapConfig, 0644); err != nil { + return fmt.Errorf("failed to write merged config to %q: %v", opt.ConfigOutputFile, err) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go new file mode 100644 index 00000000000..4cb8fcef5d2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go @@ -0,0 +1,42 @@ +package resourceapply + +import ( + "k8s.io/klog" + + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + apiextclientv1beta1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyCustomResourceDefinition applies the required CustomResourceDefinition to the cluster. +func ApplyCustomResourceDefinition(client apiextclientv1beta1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextv1beta1.CustomResourceDefinition) (*apiextv1beta1.CustomResourceDefinition, bool, error) { + existing, err := client.CustomResourceDefinitions().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.CustomResourceDefinitions().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureCustomResourceDefinition(modified, existingCopy, *required) + if !*modified { + return existing, false, nil + } + + if klog.V(4) { + klog.Infof("CustomResourceDefinition %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.CustomResourceDefinitions().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go new file mode 100644 index 00000000000..1d69176ed1e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go @@ -0,0 +1,47 @@ +package resourceapply + +import ( + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyAPIService merges objectmeta and requires apiservice coordinates. It does not touch CA bundles, which should be managed via service CA controller. +func ApplyAPIService(client apiregistrationv1client.APIServicesGetter, recorder events.Recorder, required *apiregistrationv1.APIService) (*apiregistrationv1.APIService, bool, error) { + existing, err := client.APIServices().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.APIServices().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + serviceSame := equality.Semantic.DeepEqual(existingCopy.Spec.Service, required.Spec.Service) + prioritySame := existingCopy.Spec.VersionPriority == required.Spec.VersionPriority && existingCopy.Spec.GroupPriorityMinimum == required.Spec.GroupPriorityMinimum + insecureSame := existingCopy.Spec.InsecureSkipTLSVerify == required.Spec.InsecureSkipTLSVerify + // there was no change to metadata, the service and priorities were right + if !*modified && serviceSame && prioritySame && insecureSame { + return existingCopy, false, nil + } + + existingCopy.Spec = required.Spec + + if klog.V(4) { + klog.Infof("APIService %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy)) + } + actual, err := client.APIServices().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go new file mode 100644 index 00000000000..91630dba51d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go @@ -0,0 +1,114 @@ +package resourceapply + +import ( + "k8s.io/klog" + + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyDeployment merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error +func ApplyDeployment(client appsclientv1.DeploymentsGetter, recorder events.Recorder, required *appsv1.Deployment, expectedGeneration int64, + forceRollout bool) (*appsv1.Deployment, bool, error) { + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + existing, err := client.Deployments(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Deployments(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4) { + klog.Infof("Deployment %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite)) + } + + actual, err := client.Deployments(required.Namespace).Update(toWrite) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyDaemonSet merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error +func ApplyDaemonSet(client appsclientv1.DaemonSetsGetter, recorder events.Recorder, required *appsv1.DaemonSet, expectedGeneration int64, forceRollout bool) (*appsv1.DaemonSet, bool, error) { + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + existing, err := client.DaemonSets(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.DaemonSets(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4) { + klog.Infof("DaemonSet %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite)) + } + actual, err := client.DaemonSets(required.Namespace).Update(toWrite) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go new file mode 100644 index 00000000000..51b55f96b0f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -0,0 +1,297 @@ +package resourceapply + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyNamespace merges objectmeta, does not worry about anything else +func ApplyNamespace(client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace) (*corev1.Namespace, bool, error) { + existing, err := client.Namespaces().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Namespaces().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + + if klog.V(4) { + klog.Infof("Namespace %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.Namespaces().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyService merges objectmeta and requires +// TODO, since this cannot determine whether changes are due to legitimate actors (api server) or illegitimate ones (users), we cannot update +// TODO I've special cased the selector for now +func ApplyService(client coreclientv1.ServicesGetter, recorder events.Recorder, required *corev1.Service) (*corev1.Service, bool, error) { + existing, err := client.Services(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Services(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + selectorSame := equality.Semantic.DeepEqual(existingCopy.Spec.Selector, required.Spec.Selector) + + typeSame := false + requiredIsEmpty := len(required.Spec.Type) == 0 + existingCopyIsCluster := existingCopy.Spec.Type == corev1.ServiceTypeClusterIP + if (requiredIsEmpty && existingCopyIsCluster) || equality.Semantic.DeepEqual(existingCopy.Spec.Type, required.Spec.Type) { + typeSame = true + } + + if selectorSame && typeSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Spec.Selector = required.Spec.Selector + existingCopy.Spec.Type = required.Spec.Type // if this is different, the update will fail. Status will indicate it. + + if klog.V(4) { + klog.Infof("Service %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + + actual, err := client.Services(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyPod merges objectmeta, does not worry about anything else +func ApplyPod(client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod) (*corev1.Pod, bool, error) { + existing, err := client.Pods(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Pods(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + + if klog.V(4) { + klog.Infof("Pod %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + + actual, err := client.Pods(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyServiceAccount merges objectmeta, does not worry about anything else +func ApplyServiceAccount(client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) { + existing, err := client.ServiceAccounts(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ServiceAccounts(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + return existingCopy, false, nil + } + if klog.V(4) { + klog.Infof("ServiceAccount %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + actual, err := client.ServiceAccounts(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyConfigMap merges objectmeta, requires data +func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { + existing, err := client.ConfigMaps(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ConfigMaps(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + + var modifiedKeys []string + for existingCopyKey, existingCopyValue := range existingCopy.Data { + if requiredValue, ok := required.Data[existingCopyKey]; !ok || (existingCopyValue != requiredValue) { + modifiedKeys = append(modifiedKeys, "data."+existingCopyKey) + } + } + for existingCopyKey, existingCopyBinValue := range existingCopy.BinaryData { + if requiredBinValue, ok := required.BinaryData[existingCopyKey]; !ok || !bytes.Equal(existingCopyBinValue, requiredBinValue) { + modifiedKeys = append(modifiedKeys, "binaryData."+existingCopyKey) + } + } + for requiredKey := range required.Data { + if _, ok := existingCopy.Data[requiredKey]; !ok { + modifiedKeys = append(modifiedKeys, "data."+requiredKey) + } + } + for requiredBinKey := range required.BinaryData { + if _, ok := existingCopy.BinaryData[requiredBinKey]; !ok { + modifiedKeys = append(modifiedKeys, "binaryData."+requiredBinKey) + } + } + + dataSame := len(modifiedKeys) == 0 + if dataSame && !*modified { + return existingCopy, false, nil + } + existingCopy.Data = required.Data + existingCopy.BinaryData = required.BinaryData + + actual, err := client.ConfigMaps(required.Namespace).Update(existingCopy) + + var details string + if !dataSame { + sort.Sort(sort.StringSlice(modifiedKeys)) + details = fmt.Sprintf("cause by changes in %v", strings.Join(modifiedKeys, ",")) + } + if klog.V(4) { + klog.Infof("ConfigMap %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + reportUpdateEvent(recorder, required, err, details) + return actual, true, err +} + +// ApplySecret merges objectmeta, requires data +func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) { + if len(required.StringData) > 0 { + return nil, false, fmt.Errorf("Secret.stringData is not supported") + } + + existing, err := client.Secrets(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Secrets(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + + dataSame := equality.Semantic.DeepEqual(existingCopy.Data, required.Data) + if dataSame && !*modified { + return existingCopy, false, nil + } + existingCopy.Data = required.Data + + if klog.V(4) { + klog.Infof("Secret %s/%s changes: %v", required.Namespace, required.Name, JSONPatchSecretNoError(existing, required)) + } + actual, err := client.Secrets(required.Namespace).Update(existingCopy) + + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +func SyncConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { + source, err := client.ConfigMaps(sourceNamespace).Get(sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + deleteErr := client.ConfigMaps(targetNamespace).Delete(targetName, nil) + if _, getErr := client.ConfigMaps(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } + if apierrors.IsNotFound(deleteErr) { + return nil, false, nil + } + if deleteErr == nil { + recorder.Eventf("TargetConfigDeleted", "Deleted target configmap %s/%s because source config does not exist", targetNamespace, targetName) + return nil, true, nil + } + return nil, false, deleteErr + case err != nil: + return nil, false, err + default: + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplyConfigMap(client, recorder, source) + } +} + +func SyncSecret(client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { + source, err := client.Secrets(sourceNamespace).Get(sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + if _, getErr := client.Secrets(targetNamespace).Get(targetName, metav1.GetOptions{}); getErr != nil && apierrors.IsNotFound(getErr) { + return nil, true, nil + } + deleteErr := client.Secrets(targetNamespace).Delete(targetName, nil) + if apierrors.IsNotFound(deleteErr) { + return nil, false, nil + } + if deleteErr == nil { + recorder.Eventf("TargetSecretDeleted", "Deleted target secret %s/%s because source config does not exist", targetNamespace, targetName) + return nil, true, nil + } + return nil, false, deleteErr + case err != nil: + return nil, false, err + default: + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplySecret(client, recorder, source) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core_test.go new file mode 100644 index 00000000000..127ac8be334 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core_test.go @@ -0,0 +1,203 @@ +package resourceapply + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + clienttesting "k8s.io/client-go/testing" + + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestApplyConfigMap(t *testing.T) { + tests := []struct { + name string + existing []runtime.Object + input *corev1.ConfigMap + + expectedModified bool + verifyActions func(actions []clienttesting.Action, t *testing.T) + }{ + { + name: "create", + input: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo"}, + }, + + expectedModified: true, + verifyActions: func(actions []clienttesting.Action, t *testing.T) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "configmaps") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("create", "configmaps") { + t.Error(spew.Sdump(actions)) + } + expected := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo"}, + } + actual := actions[1].(clienttesting.CreateAction).GetObject().(*corev1.ConfigMap) + if !equality.Semantic.DeepEqual(expected, actual) { + t.Error(JSONPatchNoError(expected, actual)) + } + }, + }, + { + name: "skip on extra label", + existing: []runtime.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + }, + }, + input: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo"}, + }, + + expectedModified: false, + verifyActions: func(actions []clienttesting.Action, t *testing.T) { + if len(actions) != 1 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "configmaps") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + t.Error(spew.Sdump(actions)) + } + }, + }, + { + name: "update on missing label", + existing: []runtime.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + }, + }, + input: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"new": "merge"}}, + }, + + expectedModified: true, + verifyActions: func(actions []clienttesting.Action, t *testing.T) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "configmaps") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("update", "configmaps") { + t.Error(spew.Sdump(actions)) + } + expected := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone", "new": "merge"}}, + } + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.ConfigMap) + if !equality.Semantic.DeepEqual(expected, actual) { + t.Error(JSONPatchNoError(expected, actual)) + } + }, + }, + { + name: "update on mismatch data", + existing: []runtime.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + }, + }, + input: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo"}, + Data: map[string]string{ + "configmap": "value", + }, + }, + + expectedModified: true, + verifyActions: func(actions []clienttesting.Action, t *testing.T) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "configmaps") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("update", "configmaps") { + t.Error(spew.Sdump(actions)) + } + expected := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + Data: map[string]string{ + "configmap": "value", + }, + } + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.ConfigMap) + if !equality.Semantic.DeepEqual(expected, actual) { + t.Error(JSONPatchNoError(expected, actual)) + } + }, + }, + { + name: "update on mismatch binary data", + existing: []runtime.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + Data: map[string]string{ + "configmap": "value", + }, + }, + }, + input: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo"}, + Data: map[string]string{ + "configmap": "value", + }, + BinaryData: map[string][]byte{ + "binconfigmap": []byte("value"), + }, + }, + + expectedModified: true, + verifyActions: func(actions []clienttesting.Action, t *testing.T) { + if len(actions) != 2 { + t.Fatal(spew.Sdump(actions)) + } + if !actions[0].Matches("get", "configmaps") || actions[0].(clienttesting.GetAction).GetName() != "foo" { + t.Error(spew.Sdump(actions)) + } + if !actions[1].Matches("update", "configmaps") { + t.Error(spew.Sdump(actions)) + } + expected := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "one-ns", Name: "foo", Labels: map[string]string{"extra": "leave-alone"}}, + Data: map[string]string{ + "configmap": "value", + }, + BinaryData: map[string][]byte{ + "binconfigmap": []byte("value"), + }, + } + actual := actions[1].(clienttesting.UpdateAction).GetObject().(*corev1.ConfigMap) + if !equality.Semantic.DeepEqual(expected, actual) { + t.Error(JSONPatchNoError(expected, actual)) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := fake.NewSimpleClientset(test.existing...) + _, actualModified, err := ApplyConfigMap(client.CoreV1(), events.NewInMemoryRecorder("test"), test.input) + if err != nil { + t.Fatal(err) + } + if test.expectedModified != actualModified { + t.Errorf("expected %v, got %v", test.expectedModified, actualModified) + } + test.verifyActions(client.Actions(), t) + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go new file mode 100644 index 00000000000..55142ad2b64 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go @@ -0,0 +1,86 @@ +package resourceapply + +import ( + "fmt" + "strings" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + kubescheme "k8s.io/client-go/kubernetes/scheme" + + openshiftapi "github.com/openshift/api" + + "github.com/openshift/library-go/pkg/operator/events" +) + +var ( + openshiftScheme = runtime.NewScheme() +) + +func init() { + if err := openshiftapi.Install(openshiftScheme); err != nil { + panic(err) + } +} + +// guessObjectKind returns a human name for the passed runtime object. +func guessObjectGroupKind(object runtime.Object) (string, string) { + if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { + return gvk.Group, gvk.Kind + } + if kinds, _, _ := kubescheme.Scheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0].Group, kinds[0].Kind + } + if kinds, _, _ := openshiftScheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0].Group, kinds[0].Kind + } + return "unknown", "Object" + +} + +func reportCreateEvent(recorder events.Recorder, obj runtime.Object, originalErr error) { + reportingGroup, reportingKind := guessObjectGroupKind(obj) + if len(reportingGroup) != 0 { + reportingGroup = "." + reportingGroup + } + accessor, err := meta.Accessor(obj) + if err != nil { + klog.Errorf("Failed to get accessor for %+v", obj) + return + } + namespace := "" + if len(accessor.GetNamespace()) > 0 { + namespace = " -n " + accessor.GetNamespace() + } + if originalErr == nil { + recorder.Eventf(fmt.Sprintf("%sCreated", reportingKind), "Created %s%s/%s%s because it was missing", reportingKind, reportingGroup, accessor.GetName(), namespace) + return + } + recorder.Warningf(fmt.Sprintf("%sCreateFailed", reportingKind), "Failed to create %s%s/%s%s: %v", reportingKind, reportingGroup, accessor.GetName(), namespace, originalErr) +} + +func reportUpdateEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) { + reportingGroup, reportingKind := guessObjectGroupKind(obj) + if len(reportingGroup) != 0 { + reportingGroup = "." + reportingGroup + } + accessor, err := meta.Accessor(obj) + if err != nil { + klog.Errorf("Failed to get accessor for %+v", obj) + return + } + namespace := "" + if len(accessor.GetNamespace()) > 0 { + namespace = " -n " + accessor.GetNamespace() + } + switch { + case originalErr != nil: + recorder.Warningf(fmt.Sprintf("%sUpdateFailed", reportingKind), "Failed to update %s%s/%s%s: %v", reportingKind, reportingGroup, accessor.GetName(), namespace, originalErr) + case len(details) == 0: + recorder.Eventf(fmt.Sprintf("%sUpdated", reportingKind), "Updated %s%s/%s%s because it changed", reportingKind, reportingGroup, accessor.GetName(), namespace) + default: + recorder.Eventf(fmt.Sprintf("%sUpdated", reportingKind), "Updated %s%s/%s%s: %s", reportingKind, reportingGroup, accessor.GetName(), namespace, strings.Join(details, "\n")) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers_test.go new file mode 100644 index 00000000000..3c97310c76c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers_test.go @@ -0,0 +1,147 @@ +package resourceapply + +import ( + "errors" + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestReportCreateEvent(t *testing.T) { + testErr := errors.New("test") + tests := []struct { + name string + object runtime.Object + err error + expectedEventMessage string + expectedEventReason string + }{ + { + name: "pod-with-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName"}}, + err: testErr, + expectedEventReason: "PodCreateFailed", + expectedEventMessage: "Failed to create Pod/podName: test", + }, + { + name: "pod-with-namespace", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName", Namespace: "nsName"}}, + err: testErr, + expectedEventReason: "PodCreateFailed", + expectedEventMessage: "Failed to create Pod/podName -n nsName: test", + }, + { + name: "pod-without-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName"}}, + expectedEventReason: "PodCreated", + expectedEventMessage: "Created Pod/podName because it was missing", + }, + { + name: "pod-with-namespace-without-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName", Namespace: "nsName"}}, + expectedEventReason: "PodCreated", + expectedEventMessage: "Created Pod/podName -n nsName because it was missing", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + recorder := events.NewInMemoryRecorder("test") + reportCreateEvent(recorder, test.object, test.err) + recordedEvents := recorder.Events() + + if eventCount := len(recordedEvents); eventCount != 1 { + t.Errorf("expected one event to be recorded, got %d", eventCount) + } + + if recordedEvents[0].Message != test.expectedEventMessage { + t.Errorf("expected one event message %q, got %q", test.expectedEventMessage, recordedEvents[0].Message) + } + + if recordedEvents[0].Reason != test.expectedEventReason { + t.Errorf("expected one event reason %q, got %q", test.expectedEventReason, recordedEvents[0].Reason) + } + }) + } +} + +func TestReportUpdateEvent(t *testing.T) { + testErr := errors.New("test") + tests := []struct { + name string + object runtime.Object + err error + details string + expectedEventMessage string + expectedEventReason string + }{ + { + name: "pod-with-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName"}}, + err: testErr, + expectedEventReason: "PodUpdateFailed", + expectedEventMessage: "Failed to update Pod/podName: test", + }, + { + name: "pod-with-namespace", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName", Namespace: "nsName"}}, + err: testErr, + expectedEventReason: "PodUpdateFailed", + expectedEventMessage: "Failed to update Pod/podName -n nsName: test", + }, + { + name: "pod-without-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName"}}, + expectedEventReason: "PodUpdated", + expectedEventMessage: "Updated Pod/podName because it changed", + }, + { + name: "pod-with-namespace-without-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName", Namespace: "nsName"}}, + expectedEventReason: "PodUpdated", + expectedEventMessage: "Updated Pod/podName -n nsName because it changed", + }, + { + name: "pod-with-details-without-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName"}}, + details: "because reasons", + expectedEventReason: "PodUpdated", + expectedEventMessage: "Updated Pod/podName: because reasons", + }, + { + name: "pod-with-namespace-and-details--without-error", + object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "podName", Namespace: "nsName"}}, + details: "because reasons", + expectedEventReason: "PodUpdated", + expectedEventMessage: "Updated Pod/podName -n nsName: because reasons", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + recorder := events.NewInMemoryRecorder("test") + if len(test.details) == 0 { + reportUpdateEvent(recorder, test.object, test.err) + } else { + reportUpdateEvent(recorder, test.object, test.err, test.details) + } + recordedEvents := recorder.Events() + + if eventCount := len(recordedEvents); eventCount != 1 { + t.Errorf("expected one event to be recorded, got %d", eventCount) + } + + if recordedEvents[0].Message != test.expectedEventMessage { + t.Errorf("expected one event message %q, got %q", test.expectedEventMessage, recordedEvents[0].Message) + } + + if recordedEvents[0].Reason != test.expectedEventReason { + t.Errorf("expected one event reason %q, got %q", test.expectedEventReason, recordedEvents[0].Reason) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go new file mode 100644 index 00000000000..4e91c2bb5eb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -0,0 +1,87 @@ +package resourceapply + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" + + "github.com/openshift/api" + "github.com/openshift/library-go/pkg/operator/events" +) + +var ( + genericScheme = runtime.NewScheme() + genericCodecs = serializer.NewCodecFactory(genericScheme) + genericCodec = genericCodecs.UniversalDeserializer() +) + +func init() { + utilruntime.Must(api.InstallKube(genericScheme)) +} + +type AssetFunc func(name string) ([]byte, error) + +type ApplyResult struct { + File string + Type string + Result runtime.Object + Changed bool + Error error +} + +// ApplyDirectly applies the given manifest files to API server. +func ApplyDirectly(kubeClient kubernetes.Interface, recorder events.Recorder, manifests AssetFunc, files ...string) []ApplyResult { + ret := []ApplyResult{} + + for _, file := range files { + result := ApplyResult{File: file} + objBytes, err := manifests(file) + if err != nil { + result.Error = fmt.Errorf("missing %q: %v", file, err) + ret = append(ret, result) + continue + } + requiredObj, _, err := genericCodec.Decode(objBytes, nil, nil) + if err != nil { + result.Error = fmt.Errorf("cannot decode %q: %v", file, err) + ret = append(ret, result) + continue + } + result.Type = fmt.Sprintf("%T", requiredObj) + + // NOTE: Do not add CR resources into this switch otherwise the protobuf client can cause problems. + switch t := requiredObj.(type) { + case *corev1.Namespace: + result.Result, result.Changed, result.Error = ApplyNamespace(kubeClient.CoreV1(), recorder, t) + case *corev1.Service: + result.Result, result.Changed, result.Error = ApplyService(kubeClient.CoreV1(), recorder, t) + case *corev1.Pod: + result.Result, result.Changed, result.Error = ApplyPod(kubeClient.CoreV1(), recorder, t) + case *corev1.ServiceAccount: + result.Result, result.Changed, result.Error = ApplyServiceAccount(kubeClient.CoreV1(), recorder, t) + case *corev1.ConfigMap: + result.Result, result.Changed, result.Error = ApplyConfigMap(kubeClient.CoreV1(), recorder, t) + case *corev1.Secret: + result.Result, result.Changed, result.Error = ApplySecret(kubeClient.CoreV1(), recorder, t) + case *rbacv1.ClusterRole: + result.Result, result.Changed, result.Error = ApplyClusterRole(kubeClient.RbacV1(), recorder, t) + case *rbacv1.ClusterRoleBinding: + result.Result, result.Changed, result.Error = ApplyClusterRoleBinding(kubeClient.RbacV1(), recorder, t) + case *rbacv1.Role: + result.Result, result.Changed, result.Error = ApplyRole(kubeClient.RbacV1(), recorder, t) + case *rbacv1.RoleBinding: + result.Result, result.Changed, result.Error = ApplyRoleBinding(kubeClient.RbacV1(), recorder, t) + default: + result.Error = fmt.Errorf("unhandled type %T", requiredObj) + } + + ret = append(ret, result) + } + + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic_test.go new file mode 100644 index 00000000000..1a62aff68fa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic_test.go @@ -0,0 +1,46 @@ +package resourceapply + +import ( + "testing" + + "github.com/davecgh/go-spew/spew" + + "k8s.io/client-go/kubernetes/fake" + + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestApplyDirectly(t *testing.T) { + requiredObj, gvk, err := genericCodec.Decode([]byte(`apiVersion: v1 +kind: Namespace +metadata: + name: openshift-apiserver + labels: + openshift.io/run-level: "1" +`), nil, nil) + t.Log(spew.Sdump(requiredObj)) + t.Log(spew.Sdump(gvk)) + if err != nil { + t.Fatal(err) + } +} + +func TestApplyDirectlyUnhandledType(t *testing.T) { + fakeClient := fake.NewSimpleClientset() + content := func(name string) ([]byte, error) { + return []byte(`apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: sample-claim + labels: + openshift.io/run-level: "1" +`), nil + } + recorder := events.NewInMemoryRecorder("") + ret := ApplyDirectly(fakeClient, recorder, content, "pvc") + if ret[0].Error == nil { + t.Fatal("missing expected error") + } else if ret[0].Error.Error() != "unhandled type *v1.PersistentVolumeClaim" { + t.Fatal(ret[0].Error) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go new file mode 100644 index 00000000000..ac9699affea --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go @@ -0,0 +1,70 @@ +package resourceapply + +import ( + "fmt" + + patch "github.com/evanphx/json-patch" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// JSONPatchNoError generates a JSON patch between original and modified objects and return the JSON as a string. +// Note: +// +// In case of error, the returned string will contain the error messages. +func JSONPatchNoError(original, modified runtime.Object) string { + if original == nil { + return "original object is nil" + } + if modified == nil { + return "modified object is nil" + } + originalJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, original) + if err != nil { + return fmt.Sprintf("unable to decode original to JSON: %v", err) + } + modifiedJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, modified) + if err != nil { + return fmt.Sprintf("unable to decode modified to JSON: %v", err) + } + patchBytes, err := patch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return fmt.Sprintf("unable to create JSON patch: %v", err) + } + return string(patchBytes) +} + +// JSONPatchSecretNoError generates a JSON patch between original and modified secrets, hiding its data, +// and return the JSON as a string. +// +// Note: +// In case of error, the returned string will contain the error messages. +func JSONPatchSecretNoError(original, modified *corev1.Secret) string { + if original == nil { + return "original object is nil" + } + if modified == nil { + return "modified object is nil" + } + + safeModified := modified.DeepCopy() + safeOriginal := original.DeepCopy() + + for s := range safeOriginal.Data { + safeOriginal.Data[s] = []byte("OLD") + } + for s := range safeModified.Data { + if _, preoriginal := original.Data[s]; !preoriginal { + safeModified.Data[s] = []byte("NEW") + } else if !equality.Semantic.DeepEqual(original.Data[s], safeModified.Data[s]) { + safeModified.Data[s] = []byte("MODIFIED") + } else { + safeModified.Data[s] = []byte("OLD") + } + } + + return JSONPatchNoError(safeOriginal, safeModified) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers_test.go new file mode 100644 index 00000000000..aed56998ee5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers_test.go @@ -0,0 +1,89 @@ +package resourceapply + +import ( + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestJSONPatch(t *testing.T) { + tests := []struct { + name string + original runtime.Object + modified runtime.Object + expected string + }{ + { + name: "simple diff in pod", + original: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Annotations: map[string]string{"foo": "bar"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + }, + }, + }, + }, + modified: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Annotations: map[string]string{"foo": "nobar"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + }, + }, + }, + }, + expected: `{"metadata":{"annotations":{"foo":"nobar"}}}`, + }, + { + name: "removing annotation in pod", + original: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Annotations: map[string]string{"foo": "bar"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + }, + }, + }, + }, + modified: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod"}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + }, + }, + }, + }, + expected: `{"metadata":{"annotations":null}}`, + }, + { + name: "modified is nil", + original: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Annotations: map[string]string{"foo": "bar"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + }, + }, + }, + }, + expected: `modified object is nil`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if output := JSONPatchNoError(test.original, test.modified); output != test.expected { + t.Errorf("returned string:\n%s\n\n does not match expected string:\n%s\n", output, test.expected) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go new file mode 100644 index 00000000000..51cadfbff87 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go @@ -0,0 +1,101 @@ +package resourceapply + +import ( + "fmt" + + "github.com/ghodss/yaml" + "github.com/imdario/mergo" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + "github.com/openshift/library-go/pkg/operator/events" +) + +var serviceMonitorGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"} + +func ensureServiceMonitorSpec(required, existing *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + requiredSpec, _, err := unstructured.NestedMap(required.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + + if err := mergo.Merge(&existingSpec, &requiredSpec); err != nil { + return nil, false, err + } + + if equality.Semantic.DeepEqual(existingSpec, requiredSpec) { + return existing, false, nil + } + + existingCopy := existing.DeepCopy() + if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), existingSpec, "spec"); err != nil { + return nil, true, err + } + + return existingCopy, true, nil +} + +// ApplyServiceMonitor applies the Prometheus service monitor. +func ApplyServiceMonitor(client dynamic.Interface, recorder events.Recorder, serviceMonitorBytes []byte) (bool, error) { + monitorJSON, err := yaml.YAMLToJSON(serviceMonitorBytes) + if err != nil { + return false, err + } + + monitorObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, monitorJSON) + if err != nil { + return false, err + } + + required, ok := monitorObj.(*unstructured.Unstructured) + if !ok { + return false, fmt.Errorf("unexpected object in %t", monitorObj) + } + + namespace := required.GetNamespace() + + existing, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(required.GetName(), metav1.GetOptions{}) + if errors.IsNotFound(err) { + _, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(required, metav1.CreateOptions{}) + if createErr != nil { + recorder.Warningf("ServiceMonitorCreateFailed", "Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v", createErr) + return true, createErr + } + recorder.Eventf("ServiceMonitorCreated", "Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing") + return true, nil + } + + existingCopy := existing.DeepCopy() + + updated, endpointsModified, err := ensureServiceMonitorSpec(required, existingCopy) + if err != nil { + return false, err + } + + if !endpointsModified { + return false, nil + } + + if klog.V(4) { + klog.Infof("ServiceMonitor %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, existingCopy)) + } + + if _, err = client.Resource(serviceMonitorGVR).Namespace(namespace).Update(updated, metav1.UpdateOptions{}); err != nil { + recorder.Warningf("ServiceMonitorUpdateFailed", "Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v", err) + return true, err + } + + recorder.Eventf("ServiceMonitorUpdated", "Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed") + return true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring_test.go new file mode 100644 index 00000000000..52cff2db8cb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring_test.go @@ -0,0 +1,148 @@ +package resourceapply + +import ( + "reflect" + "sort" + "testing" + + "github.com/ghodss/yaml" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/util/diff" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + dynamicfake "k8s.io/client-go/dynamic/fake" + clienttesting "k8s.io/client-go/testing" + + "github.com/openshift/library-go/pkg/operator/events" +) + +const ( + fakeServiceMonitor = `apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: cluster-kube-apiserver + namespace: openshift-kube-apiserver +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + metricRelabelings: + - action: drop + regex: etcd_(debugging|disk|request|server).* + sourceLabels: + - __name__ + port: https + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + serverName: apiserver.openshift-kube-apiserver.svc + jobLabel: component + namespaceSelector: + matchNames: + - openshift-kube-apiserver + selector: + matchLabels: + app: openshift-kube-apiserver +` + fakeIncompleteServiceMonitor = `apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: cluster-kube-apiserver + namespace: openshift-kube-apiserver +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + metricRelabelings: + - action: drop + regex: etcd_(debugging|disk|request|server).* + sourceLabels: + - __name__ + port: https + scheme: https + jobLabel: component + namespaceSelector: + matchNames: + - wrong-name + selector: + matchLabels: + custom: custom-label + app: openshift-kube-apiserver +` +) + +func readServiceMonitorFromBytes(monitorBytes []byte) *unstructured.Unstructured { + monitorJSON, err := yaml.YAMLToJSON(monitorBytes) + if err != nil { + panic(err) + } + monitorObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, monitorJSON) + if err != nil { + panic(err) + } + required, ok := monitorObj.(*unstructured.Unstructured) + if !ok { + panic("unexpected object") + } + return required +} + +func TestApplyServiceMonitor(t *testing.T) { + dynamicScheme := runtime.NewScheme() + dynamicScheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "ServiceMonitor"}, &unstructured.Unstructured{}) + + dynamicClient := dynamicfake.NewSimpleDynamicClient(dynamicScheme, readServiceMonitorFromBytes([]byte(fakeServiceMonitor))) + + modified, err := ApplyServiceMonitor(dynamicClient, events.NewInMemoryRecorder("monitor-test"), []byte(fakeIncompleteServiceMonitor)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !modified { + t.Fatalf("expected the service monitor will be modified, it was not") + } + + if len(dynamicClient.Actions()) != 2 { + t.Fatalf("expected 2 actions, got %d", len(dynamicClient.Actions())) + } + + _, isUpdate := dynamicClient.Actions()[1].(clienttesting.UpdateAction) + if !isUpdate { + t.Fatalf("expected second action to be update, got %+v", dynamicClient.Actions()[1]) + } + + updatedMonitorObj, err := dynamicClient.Resource(schema.GroupVersionResource{ + Group: "monitoring.coreos.com", + Version: "v1", + Resource: "servicemonitors", + }).Namespace("openshift-kube-apiserver").Get("cluster-kube-apiserver", metav1.GetOptions{}) + if err != nil { + t.Fatalf("expected to get update monitor, got: %v", err) + } + + labels, _, err := unstructured.NestedStringMap(updatedMonitorObj.UnstructuredContent(), "spec", "selector", "matchLabels") + if err != nil { + t.Fatalf("unable to get selector: %v", err) + } + + expectedKeys := []string{"app", "custom"} + resultKeys := []string{} + for key := range labels { + resultKeys = append(resultKeys, key) + } + sort.Strings(resultKeys) + + if !reflect.DeepEqual(resultKeys, expectedKeys) { + t.Fatalf("expected %#v selectors, got %#v", expectedKeys, resultKeys) + } + + requiredMonitorSpec, _, _ := unstructured.NestedMap(readServiceMonitorFromBytes([]byte(fakeServiceMonitor)).UnstructuredContent(), "spec", "endpoints") + existingMonitorSpec, _, _ := unstructured.NestedMap(updatedMonitorObj.UnstructuredContent(), "spec", "endpoints") + + if !equality.Semantic.DeepEqual(requiredMonitorSpec, existingMonitorSpec) { + t.Fatalf("expected resulting service monitor spec endpoints to match required spec: %s", diff.ObjectDiff(requiredMonitorSpec, existingMonitorSpec)) + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go new file mode 100644 index 00000000000..7377cc48eb8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go @@ -0,0 +1,190 @@ +package resourceapply + +import ( + "fmt" + + "k8s.io/klog" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyClusterRole merges objectmeta, requires rules, aggregation rules are not allowed for now. +func ApplyClusterRole(client rbacclientv1.ClusterRolesGetter, recorder events.Recorder, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { + if required.AggregationRule != nil && len(required.AggregationRule.ClusterRoleSelectors) != 0 { + return nil, false, fmt.Errorf("cannot create an aggregated cluster role") + } + + existing, err := client.ClusterRoles().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoles().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + existingCopy.AggregationRule = nil + + if klog.V(4) { + klog.Infof("ClusterRole %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.ClusterRoles().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyClusterRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyClusterRoleBinding(client rbacclientv1.ClusterRoleBindingsGetter, recorder events.Recorder, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { + existing, err := client.ClusterRoleBindings().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.ClusterRoleBindings().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4) { + klog.Infof("ClusterRoleBinding %q changes: %v", requiredCopy.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.ClusterRoleBindings().Update(existingCopy) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} + +// ApplyRole merges objectmeta, requires rules +func ApplyRole(client rbacclientv1.RolesGetter, recorder events.Recorder, required *rbacv1.Role) (*rbacv1.Role, bool, error) { + existing, err := client.Roles(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Roles(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + + if klog.V(4) { + klog.Infof("Role %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, existingCopy)) + } + actual, err := client.Roles(required.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyRoleBinding(client rbacclientv1.RoleBindingsGetter, recorder events.Recorder, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { + existing, err := client.RoleBindings(required.Namespace).Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.RoleBindings(required.Namespace).Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs and subjects + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4) { + klog.Infof("RoleBinding %q changes: %v", requiredCopy.Namespace+"/"+requiredCopy.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.RoleBindings(requiredCopy.Namespace).Update(existingCopy) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go new file mode 100644 index 00000000000..0c3cd965eff --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go @@ -0,0 +1,50 @@ +package resourceapply + +import ( + "k8s.io/klog" + + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storageclientv1 "k8s.io/client-go/kubernetes/typed/storage/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyStorageClass merges objectmeta, tries to write everything else +func ApplyStorageClass(client storageclientv1.StorageClassesGetter, recorder events.Recorder, required *storagev1.StorageClass) (*storagev1.StorageClass, bool, + error) { + existing, err := client.StorageClasses().Get(required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.StorageClasses().Create(required) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy, required) + if contentSame && !*modified { + return existingCopy, false, nil + } + + objectMeta := existingCopy.ObjectMeta.DeepCopy() + existingCopy = required.DeepCopy() + existingCopy.ObjectMeta = *objectMeta + + if klog.V(4) { + klog.Infof("StorageClass %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + // TODO if provisioner, parameters, reclaimpolicy, or volumebindingmode are different, update will fail so delete and recreate + actual, err := client.StorageClasses().Update(existingCopy) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/coordinates.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/coordinates.go new file mode 100644 index 00000000000..50458f784dd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/coordinates.go @@ -0,0 +1,16 @@ +package resourcegraph + +type ResourceCoordinates struct { + Group string + Resource string + Namespace string + Name string +} + +func (c ResourceCoordinates) String() string { + resource := c.Resource + if len(c.Group) > 0 { + resource = resource + "." + c.Group + } + return resource + "/" + c.Name + "[" + c.Namespace + "]" +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/interface.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/interface.go new file mode 100644 index 00000000000..a7402b8169b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/interface.go @@ -0,0 +1,62 @@ +package resourcegraph + +import ( + "fmt" + + "github.com/gonum/graph" +) + +func NewResources() Resources { + return &resourcesImpl{} +} + +func NewResource(coordinates ResourceCoordinates) Resource { + return &simpleSource{coordinates: coordinates} +} + +func NewConfigMap(namespace, name string) Resource { + return NewResource(NewCoordinates("", "configmaps", namespace, name)) +} + +func NewSecret(namespace, name string) Resource { + return NewResource(NewCoordinates("", "secrets", namespace, name)) +} + +func NewOperator(name string) Resource { + return NewResource(NewCoordinates("config.openshift.io", "clusteroperators", "", name)) +} + +func NewConfig(resource string) Resource { + return NewResource(NewCoordinates("config.openshift.io", resource, "", "cluster")) +} + +type Resource interface { + Add(resources Resources) Resource + From(Resource) Resource + Note(note string) Resource + + fmt.Stringer + GetNote() string + Coordinates() ResourceCoordinates + Sources() []Resource + Dump(indentDepth int) []string + DumpSources(indentDepth int) []string +} + +type Resources interface { + Add(resource Resource) + Dump() []string + AllResources() []Resource + Resource(coordinates ResourceCoordinates) Resource + Roots() []Resource + NewGraph() graph.Directed +} + +func NewCoordinates(group, resource, namespace, name string) ResourceCoordinates { + return ResourceCoordinates{ + Group: group, + Resource: resource, + Namespace: namespace, + Name: name, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resource.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resource.go new file mode 100644 index 00000000000..e3668e76c56 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resource.go @@ -0,0 +1,73 @@ +package resourcegraph + +import ( + "fmt" +) + +type simpleSource struct { + coordinates ResourceCoordinates + note string + nested []Resource + sources []Resource +} + +func (r *simpleSource) Coordinates() ResourceCoordinates { + return r.coordinates +} + +func (s *simpleSource) Add(resources Resources) Resource { + resources.Add(s) + return s +} + +func (s *simpleSource) From(source Resource) Resource { + s.sources = append(s.sources, source) + return s +} + +func (s *simpleSource) Note(note string) Resource { + s.note = note + return s +} + +func (s *simpleSource) String() string { + return fmt.Sprintf("%v%s", s.coordinates, s.note) +} + +func (s *simpleSource) GetNote() string { + return s.note +} + +func (s *simpleSource) Sources() []Resource { + return s.sources +} + +func (r *simpleSource) Dump(indentDepth int) []string { + lines := []string{} + lines = append(lines, indent(indentDepth, r.String())) + + for _, nested := range r.nested { + lines = append(lines, nested.Dump(indentDepth+1)...) + } + + return lines +} + +func (r *simpleSource) DumpSources(indentDepth int) []string { + lines := []string{} + lines = append(lines, indent(indentDepth, r.String())) + + for _, source := range r.sources { + lines = append(lines, source.DumpSources(indentDepth+1)...) + } + + return lines +} + +func indent(depth int, in string) string { + indent := "" + for i := 0; i < depth; i++ { + indent = indent + " " + } + return indent + in +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resources.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resources.go new file mode 100644 index 00000000000..482ea9b8d56 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcegraph/resources.go @@ -0,0 +1,126 @@ +package resourcegraph + +import ( + "fmt" + "strings" + + "github.com/gonum/graph" + "github.com/gonum/graph/encoding/dot" + "github.com/gonum/graph/simple" +) + +type resourcesImpl struct { + resources []Resource +} + +func (r *resourcesImpl) Add(resource Resource) { + r.resources = append(r.resources, resource) +} + +func (r *resourcesImpl) Dump() []string { + lines := []string{} + for _, root := range r.Roots() { + lines = append(lines, root.Dump(0)...) + } + return lines +} + +func (r *resourcesImpl) AllResources() []Resource { + ret := []Resource{} + for _, v := range r.resources { + ret = append(ret, v) + } + return ret +} + +func (r *resourcesImpl) Resource(coordinates ResourceCoordinates) Resource { + for _, v := range r.resources { + if v.Coordinates() == coordinates { + return v + } + } + return nil +} + +func (r *resourcesImpl) Roots() []Resource { + ret := []Resource{} + for _, resource := range r.AllResources() { + if len(resource.Sources()) > 0 { + continue + } + ret = append(ret, resource) + } + return ret +} + +type resourceGraphNode struct { + simple.Node + Resource Resource +} + +// DOTAttributes implements an attribute getter for the DOT encoding +func (n resourceGraphNode) DOTAttributes() []dot.Attribute { + color := "white" + switch { + case n.Resource.Coordinates().Resource == "clusteroperators": + color = `"#c8fbcd"` // green + case n.Resource.Coordinates().Resource == "configmaps": + color = `"#bdebfd"` // blue + case n.Resource.Coordinates().Resource == "secrets": + color = `"#fffdb8"` // yellow + case n.Resource.Coordinates().Resource == "pods": + color = `"#ffbfb8"` // red + case n.Resource.Coordinates().Group == "config.openshift.io": + color = `"#c7bfff"` // purple + } + resource := n.Resource.Coordinates().Resource + if len(n.Resource.Coordinates().Group) > 0 { + resource = resource + "." + n.Resource.Coordinates().Group + } + label := fmt.Sprintf("%s\n%s\n%s\n%s", resource, n.Resource.Coordinates().Name, n.Resource.Coordinates().Namespace, n.Resource.GetNote()) + return []dot.Attribute{ + {Key: "label", Value: fmt.Sprintf("%q", label)}, + {Key: "style", Value: "filled"}, + {Key: "fillcolor", Value: color}, + } +} + +func (r *resourcesImpl) NewGraph() graph.Directed { + g := simple.NewDirectedGraph(1.0, 0.0) + + coordinatesToNode := map[ResourceCoordinates]graph.Node{} + idToCoordinates := map[int]ResourceCoordinates{} + + // make all nodes + allResources := r.AllResources() + for i := range allResources { + resource := allResources[i] + id := g.NewNodeID() + node := resourceGraphNode{Node: simple.Node(id), Resource: resource} + + coordinatesToNode[resource.Coordinates()] = node + idToCoordinates[id] = resource.Coordinates() + g.AddNode(node) + } + + // make all edges + for i := range allResources { + resource := allResources[i] + + for _, source := range resource.Sources() { + from := coordinatesToNode[source.Coordinates()] + to := coordinatesToNode[resource.Coordinates()] + g.SetEdge(simple.Edge{F: from, T: to}) + } + } + + return g +} + +// Quote takes an arbitrary DOT ID and escapes any quotes that is contains. +// The resulting string is quoted again to guarantee that it is a valid ID. +// DOT graph IDs can be any double-quoted string +// See http://www.graphviz.org/doc/info/lang.html +func Quote(id string) string { + return fmt.Sprintf(`"%s"`, strings.Replace(id, `"`, `\"`, -1)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehash/as_configmap.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehash/as_configmap.go new file mode 100644 index 00000000000..aa8b3ec27db --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehash/as_configmap.go @@ -0,0 +1,171 @@ +package resourcehash + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "hash/fnv" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/listers/core/v1" +) + +// GetConfigMapHash returns a hash of the configmap data +func GetConfigMapHash(obj *corev1.ConfigMap) (string, error) { + hasher := fnv.New32() + if err := json.NewEncoder(hasher).Encode(obj.Data); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(hasher.Sum(nil)), nil +} + +// GetSecretHash returns a hash of the secret data +func GetSecretHash(obj *corev1.Secret) (string, error) { + hasher := fnv.New32() + if err := json.NewEncoder(hasher).Encode(obj.Data); err != nil { + return "", err + } + return base64.URLEncoding.EncodeToString(hasher.Sum(nil)), nil +} + +// MultipleObjectHashStringMap returns a map of key/hash pairs suitable for merging into a configmap +func MultipleObjectHashStringMap(objs ...runtime.Object) (map[string]string, error) { + ret := map[string]string{} + + for _, obj := range objs { + switch t := obj.(type) { + case *corev1.ConfigMap: + hash, err := GetConfigMapHash(t) + if err != nil { + return nil, err + } + // this string coercion is lossy, but it should be fairly controlled and must be an allowed name + ret[mapKeyFor("configmap", t.Namespace, t.Name)] = hash + + case *corev1.Secret: + hash, err := GetSecretHash(t) + if err != nil { + return nil, err + } + // this string coercion is lossy, but it should be fairly controlled and must be an allowed name + ret[mapKeyFor("secret", t.Namespace, t.Name)] = hash + + default: + return nil, fmt.Errorf("%T is not handled", t) + } + } + + return ret, nil +} + +func mapKeyFor(resource, namespace, name string) string { + return fmt.Sprintf("%s.%s.%s", namespace, name, resource) +} + +// ObjectReference can be used to reference a particular resource. Not all group resources are respected by all methods. +type ObjectReference struct { + Resource schema.GroupResource + Namespace string + Name string +} + +// MultipleObjectHashStringMapForObjectReferences returns a map of key/hash pairs suitable for merging into a configmap +func MultipleObjectHashStringMapForObjectReferences(client kubernetes.Interface, objRefs ...*ObjectReference) (map[string]string, error) { + objs := []runtime.Object{} + + for _, objRef := range objRefs { + switch objRef.Resource { + case schema.GroupResource{Resource: "configmap"}, schema.GroupResource{Resource: "configmaps"}: + obj, err := client.CoreV1().ConfigMaps(objRef.Namespace).Get(objRef.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + case schema.GroupResource{Resource: "secret"}, schema.GroupResource{Resource: "secrets"}: + obj, err := client.CoreV1().Secrets(objRef.Namespace).Get(objRef.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + default: + return nil, fmt.Errorf("%v is not handled", objRef.Resource) + } + } + + return MultipleObjectHashStringMap(objs...) +} + +// MultipleObjectHashStringMapForObjectReferenceFromLister is MultipleObjectHashStringMapForObjectReferences using a lister for performance +func MultipleObjectHashStringMapForObjectReferenceFromLister(configmapLister v1.ConfigMapLister, secretLister v1.SecretLister, objRefs ...*ObjectReference) (map[string]string, error) { + objs := []runtime.Object{} + + for _, objRef := range objRefs { + switch objRef.Resource { + case schema.GroupResource{Resource: "configmap"}, schema.GroupResource{Resource: "configmaps"}: + obj, err := configmapLister.ConfigMaps(objRef.Namespace).Get(objRef.Name) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + case schema.GroupResource{Resource: "secret"}, schema.GroupResource{Resource: "secrets"}: + obj, err := secretLister.Secrets(objRef.Namespace).Get(objRef.Name) + if apierrors.IsNotFound(err) { + // don't error, just don't list the key. this is different than empty + continue + } + if err != nil { + return nil, err + } + objs = append(objs, obj) + + default: + return nil, fmt.Errorf("%v is not handled", objRef.Resource) + } + } + + return MultipleObjectHashStringMap(objs...) +} + +func NewObjectRef() *ObjectReference { + return &ObjectReference{} +} + +func (r *ObjectReference) ForConfigMap() *ObjectReference { + r.Resource = schema.GroupResource{Resource: "configmaps"} + return r +} + +func (r *ObjectReference) ForSecret() *ObjectReference { + r.Resource = schema.GroupResource{Resource: "secrets"} + return r +} + +func (r *ObjectReference) Named(name string) *ObjectReference { + r.Name = name + return r +} + +func (r *ObjectReference) InNamespace(namespace string) *ObjectReference { + r.Namespace = namespace + return r +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go new file mode 100644 index 00000000000..32e4043f626 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go @@ -0,0 +1,18 @@ +package resourcemerge + +import ( + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" +) + +// EnsureCustomResourceDefinition ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureCustomResourceDefinition(modified *bool, existing *apiextv1beta1.CustomResourceDefinition, required apiextv1beta1.CustomResourceDefinition) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go new file mode 100644 index 00000000000..1731382e688 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go @@ -0,0 +1,80 @@ +package resourcemerge + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorsv1 "github.com/openshift/api/operator/v1" +) + +func GenerationFor(generations []operatorsv1.GenerationStatus, resource schema.GroupResource, namespace, name string) *operatorsv1.GenerationStatus { + for i := range generations { + curr := &generations[i] + if curr.Namespace == namespace && + curr.Name == name && + curr.Group == resource.Group && + curr.Resource == resource.Resource { + + return curr + } + } + + return nil +} + +func SetGeneration(generations *[]operatorsv1.GenerationStatus, newGeneration operatorsv1.GenerationStatus) { + if generations == nil { + generations = &[]operatorsv1.GenerationStatus{} + } + + existingGeneration := GenerationFor(*generations, schema.GroupResource{Group: newGeneration.Group, Resource: newGeneration.Resource}, newGeneration.Namespace, newGeneration.Name) + if existingGeneration == nil { + *generations = append(*generations, newGeneration) + return + } + + existingGeneration.LastGeneration = newGeneration.LastGeneration + existingGeneration.Hash = newGeneration.Hash +} + +func ExpectedDeploymentGeneration(required *appsv1.Deployment, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "deployments"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDeploymentGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.Deployment) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "deployments", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} + +func ExpectedDaemonSetGeneration(required *appsv1.DaemonSet, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "daemonsets"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDaemonSetGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.DaemonSet) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "daemonsets", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go new file mode 100644 index 00000000000..b28c8770ae3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go @@ -0,0 +1,230 @@ +package resourcemerge + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + + "k8s.io/klog" + "sigs.k8s.io/yaml" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +// MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap +func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + return MergePrunedConfigMap(nil, configMap, configKey, specialCases, configYAMLs...) +} + +// MergePrunedConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap. +// It roundtrips the config through the given schema. +func MergePrunedConfigMap(schema runtime.Object, configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + configBytes, err := MergePrunedProcessConfig(schema, specialCases, configYAMLs...) + if err != nil { + return nil, false, err + } + + if reflect.DeepEqual(configMap.Data[configKey], configBytes) { + return configMap, false, nil + } + + ret := configMap.DeepCopy() + ret.Data[configKey] = string(configBytes) + + return ret, true, nil +} + +// MergeProcessConfig merges a series of config yaml files together with each later one overlaying all previous +func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + currentConfigYAML := configYAMLs[0] + + for _, currConfigYAML := range configYAMLs[1:] { + prevConfigJSON, err := kyaml.ToJSON(currentConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + prevConfigJSON = currentConfigYAML + } + prevConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(prevConfigJSON)).Decode(&prevConfig); err != nil { + return nil, err + } + + if len(currConfigYAML) > 0 { + currConfigJSON, err := kyaml.ToJSON(currConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + currConfigJSON = currConfigYAML + } + currConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(currConfigJSON)).Decode(&currConfig); err != nil { + return nil, err + } + + // protected against mismatched typemeta + prevAPIVersion, _, _ := unstructured.NestedString(prevConfig, "apiVersion") + prevKind, _, _ := unstructured.NestedString(prevConfig, "kind") + currAPIVersion, _, _ := unstructured.NestedString(currConfig, "apiVersion") + currKind, _, _ := unstructured.NestedString(currConfig, "kind") + currGVKSet := len(currAPIVersion) > 0 || len(currKind) > 0 + gvkMismatched := currAPIVersion != prevAPIVersion || currKind != prevKind + if currGVKSet && gvkMismatched { + return nil, fmt.Errorf("%v/%v does not equal %v/%v", currAPIVersion, currKind, prevAPIVersion, prevKind) + } + + if err := mergeConfig(prevConfig, currConfig, "", specialCases); err != nil { + return nil, err + } + } + + currentConfigYAML, err = runtime.Encode(unstructured.UnstructuredJSONScheme, &unstructured.Unstructured{Object: prevConfig}) + if err != nil { + return nil, err + } + } + + return currentConfigYAML, nil +} + +// MergePrunedProcessConfig merges a series of config yaml files together with each later one overlaying all previous. +// The result is roundtripped through the given schema if it is non-nil. +func MergePrunedProcessConfig(schema runtime.Object, specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + bs, err := MergeProcessConfig(specialCases, configYAMLs...) + if err != nil { + return nil, err + } + + if schema == nil { + return bs, nil + } + + // roundtrip through the schema + typed := schema.DeepCopyObject() + if err := yaml.Unmarshal(bs, typed); err != nil { + return nil, err + } + typedBytes, err := json.Marshal(typed) + if err != nil { + return nil, err + } + var untypedJSON map[string]interface{} + if err := json.Unmarshal(typedBytes, &untypedJSON); err != nil { + return nil, err + } + + // and intersect output with input because we cannot rely on omitempty in the schema + inputBytes, err := yaml.YAMLToJSON(bs) + if err != nil { + return nil, err + } + var inputJSON map[string]interface{} + if err := json.Unmarshal(inputBytes, &inputJSON); err != nil { + return nil, err + } + return json.Marshal(intersectJSON(inputJSON, untypedJSON)) +} + +type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) + +// mergeConfig overwrites entries in curr by additional. It modifies curr. +func mergeConfig(curr, additional map[string]interface{}, currentPath string, specialCases map[string]MergeFunc) error { + for additionalKey, additionalVal := range additional { + fullKey := currentPath + "." + additionalKey + specialCase, ok := specialCases[fullKey] + if ok { + var err error + curr[additionalKey], err = specialCase(curr[additionalKey], additionalVal, currentPath) + if err != nil { + return err + } + continue + } + + currVal, ok := curr[additionalKey] + if !ok { + curr[additionalKey] = additionalVal + continue + } + + // only some scalars are accepted + switch castVal := additionalVal.(type) { + case map[string]interface{}: + currValAsMap, ok := currVal.(map[string]interface{}) + if !ok { + currValAsMap = map[string]interface{}{} + curr[additionalKey] = currValAsMap + } + + err := mergeConfig(currValAsMap, castVal, fullKey, specialCases) + if err != nil { + return err + } + continue + + default: + if err := unstructured.SetNestedField(curr, castVal, additionalKey); err != nil { + return err + } + } + + } + + return nil +} + +// jsonIntersection returns the intersection of both JSON object, +// preferring the values of the first argument. +func intersectJSON(x1, x2 map[string]interface{}) map[string]interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := map[string]interface{}{} + for k, v1 := range x1 { + v2, ok := x2[k] + if !ok { + continue + } + ret[k] = intersectValue(v1, v2) + } + return ret +} + +func intersectArray(x1, x2 []interface{}) []interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := make([]interface{}, 0, len(x1)) + for i := range x1 { + if i >= len(x2) { + break + } + ret = append(ret, intersectValue(x1[i], x2[i])) + } + return ret +} + +func intersectValue(x1, x2 interface{}) interface{} { + switch x1 := x1.(type) { + case map[string]interface{}: + x2, ok := x2.(map[string]interface{}) + if !ok { + return x1 + } + return intersectJSON(x1, x2) + case []interface{}: + x2, ok := x2.([]interface{}) + if !ok { + return x1 + } + return intersectArray(x1, x2) + default: + return x1 + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go new file mode 100644 index 00000000000..efaff9d6b60 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go @@ -0,0 +1,273 @@ +package resourcemerge + +import ( + "reflect" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/diff" + + controlplanev1 "github.com/openshift/api/kubecontrolplane/v1" +) + +func TestMergeConfig(t *testing.T) { + tests := []struct { + name string + curr map[string]interface{} + additional map[string]interface{} + specialCases map[string]MergeFunc + + expected map[string]interface{} + expectedErr string + }{ + { + name: "add non-conflicting", + curr: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": "one", + }, + }, + additional: map[string]interface{}{ + "bravo": map[string]interface{}{ + "banana": "two", + "cake": map[string]interface{}{ + "armadillo": "uno", + }, + }, + "charlie": "third", + }, + + expected: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": "one", + "banana": "two", + "cake": map[string]interface{}{ + "armadillo": "uno", + }, + }, + "charlie": "third", + }, + }, + { + name: "add conflicting, replace type", + curr: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": "one", + }, + }, + additional: map[string]interface{}{ + "bravo": map[string]interface{}{ + "apple": map[string]interface{}{ + "armadillo": "uno", + }, + }, + }, + + expected: map[string]interface{}{ + "alpha": "first", + "bravo": map[string]interface{}{ + "apple": map[string]interface{}{ + "armadillo": "uno", + }, + }, + }, + }, + { + name: "nil out", + curr: map[string]interface{}{ + "alpha": "first", + }, + additional: map[string]interface{}{ + "alpha": nil, + }, + + expected: map[string]interface{}{ + "alpha": nil, + }, + }, + { + name: "force empty", + curr: map[string]interface{}{ + "alpha": "first", + }, + additional: map[string]interface{}{ + "alpha": "", + }, + + expected: map[string]interface{}{ + "alpha": "", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := mergeConfig(test.curr, test.additional, "", test.specialCases) + switch { + case err == nil && len(test.expectedErr) == 0: + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing %q", test.expectedErr) + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err != nil && len(test.expectedErr) != 0 && !strings.Contains(err.Error(), test.expectedErr): + t.Fatalf("expected %q, got %q", test.expectedErr, err) + } + + if !reflect.DeepEqual(test.expected, test.curr) { + t.Error(diff.ObjectDiff(test.expected, test.curr)) + } + }) + } +} + +func TestMergeProcessConfig(t *testing.T) { + tests := []struct { + name string + curr string + additional string + specialCases map[string]MergeFunc + + expected string + expectedErr string + }{ + { + name: "no conflict on missing typemeta", + curr: ` +apiVersion: foo +kind: the-kind +alpha: first +`, + additional: ` +bravo: two +`, + expected: `{"alpha":"first","apiVersion":"foo","bravo":"two","kind":"the-kind"} +`, + }, + { + curr: ` +apiVersion: foo +kind: the-kind +alpha: first +`, + name: "no conflict on same typemeta", + additional: ` +apiVersion: foo +kind: the-kind +bravo: two +`, + expected: `{"alpha":"first","apiVersion":"foo","bravo":"two","kind":"the-kind"} +`, + }, + { + name: "conflict on different typemeta 01", + curr: ` +apiVersion: foo +kind: the-kind +alpha: first +`, + additional: ` +kind: the-other-kind +bravo: two +`, + expectedErr: `/the-other-kind does not equal foo/the-kind`, + }, + { + name: "conflict on different typemeta 03", + curr: ` +apiVersion: foo +kind: the-kind +alpha: first +`, + additional: ` +apiVersion: bar +bravo: two +`, + expectedErr: `bar/ does not equal foo/the-kind`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := MergeProcessConfig(test.specialCases, []byte(test.curr), []byte(test.additional)) + switch { + case err == nil && len(test.expectedErr) == 0: + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing %q", test.expectedErr) + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err != nil && len(test.expectedErr) != 0 && !strings.Contains(err.Error(), test.expectedErr): + t.Fatalf("expected %q, got %q", test.expectedErr, err) + } + if err != nil { + return + } + + if test.expected != string(actual) { + t.Error(diff.StringDiff(test.expected, string(actual))) + } + }) + } +} + +func TestMergePrunedConfig(t *testing.T) { + tests := []struct { + name string + curr string + additional string + specialCases map[string]MergeFunc + + expected string + expectedErr string + }{ + { + name: "prune unknown values", + curr: ` +apiVersion: foo +kind: the-kind +alpha: first +`, + additional: ` +consolePublicURL: http://foo/bar +`, + expected: `{"apiVersion":"foo","consolePublicURL":"http://foo/bar","kind":"the-kind"}`, + }, + { + name: "prune unknown values with array", + curr: ` +apiVersion: foo +kind: the-kind +corsAllowedOrigins: +- (?i)//openshift(:|\z) +`, + additional: ` +consolePublicURL: http://foo/bar +`, + expected: `{"apiVersion":"foo","consolePublicURL":"http://foo/bar","corsAllowedOrigins":["(?i)//openshift(:|\\z)"],"kind":"the-kind"}`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := MergePrunedProcessConfig(&controlplanev1.KubeAPIServerConfig{}, test.specialCases, []byte(test.curr), []byte(test.additional)) + switch { + case err == nil && len(test.expectedErr) == 0: + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing %q", test.expectedErr) + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err != nil && len(test.expectedErr) != 0 && !strings.Contains(err.Error(), test.expectedErr): + t.Fatalf("expected %q, got %q", test.expectedErr, err) + } + if err != nil { + return + } + + if test.expected != string(actual) { + t.Error(diff.StringDiff(test.expected, string(actual))) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go new file mode 100644 index 00000000000..9d03da6e2dc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go @@ -0,0 +1,153 @@ +package resourcemerge + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EnsureObjectMeta writes namespace, name, labels, and annotations. Don't set other things here. +// TODO finalizer support maybe? +func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) { + SetStringIfSet(modified, &existing.Namespace, required.Namespace) + SetStringIfSet(modified, &existing.Name, required.Name) + MergeMap(modified, &existing.Labels, required.Labels) + MergeMap(modified, &existing.Annotations, required.Annotations) +} + +func stringPtr(val string) *string { + return &val +} + +func SetString(modified *bool, existing *string, required string) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetStringIfSet(modified *bool, existing *string, required string) { + if len(required) == 0 { + return + } + if required != *existing { + *existing = required + *modified = true + } +} + +func setStringPtr(modified *bool, existing **string, required *string) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetString(modified, *existing, *required) +} + +func SetStringSlice(modified *bool, existing *[]string, required []string) { + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func SetStringSliceIfSet(modified *bool, existing *[]string, required []string) { + if required == nil { + return + } + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func BoolPtr(val bool) *bool { + return &val +} + +func SetBool(modified *bool, existing *bool, required bool) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setBoolPtr(modified *bool, existing **bool, required *bool) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetBool(modified, *existing, *required) +} + +func int64Ptr(val int64) *int64 { + return &val +} + +func SetInt32(modified *bool, existing *int32, required int32) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetInt32IfSet(modified *bool, existing *int32, required int32) { + if required == 0 { + return + } + + SetInt32(modified, existing, required) +} + +func SetInt64(modified *bool, existing *int64, required int64) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setInt64Ptr(modified *bool, existing **int64, required *int64) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetInt64(modified, *existing, *required) +} + +func MergeMap(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + for k, v := range required { + if existingV, ok := (*existing)[k]; !ok || v != existingV { + *modified = true + (*existing)[k] = v + } + } +} + +func SetMapStringString(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} + +func SetMapStringStringIfSet(modified *bool, existing *map[string]string, required map[string]string) { + if required == nil { + return + } + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go new file mode 100644 index 00000000000..81a11c871ce --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + apiExtensionsScheme = runtime.NewScheme() + apiExtensionsCodecs = serializer.NewCodecFactory(apiExtensionsScheme) +) + +func init() { + if err := apiextensionsv1beta1.AddToScheme(apiExtensionsScheme); err != nil { + panic(err) + } +} + +func ReadCustomResourceDefinitionV1Beta1OrDie(objBytes []byte) *apiextensionsv1beta1.CustomResourceDefinition { + requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextensionsv1beta1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*apiextensionsv1beta1.CustomResourceDefinition) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go new file mode 100644 index 00000000000..8490017e1c7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go @@ -0,0 +1,34 @@ +package resourceread + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + appsScheme = runtime.NewScheme() + appsCodecs = serializer.NewCodecFactory(appsScheme) +) + +func init() { + if err := appsv1.AddToScheme(appsScheme); err != nil { + panic(err) + } +} + +func ReadDeploymentV1OrDie(objBytes []byte) *appsv1.Deployment { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.Deployment) +} + +func ReadDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.DaemonSet) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go new file mode 100644 index 00000000000..ac2b477585b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go @@ -0,0 +1,70 @@ +package resourceread + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + coreScheme = runtime.NewScheme() + coreCodecs = serializer.NewCodecFactory(coreScheme) +) + +func init() { + if err := corev1.AddToScheme(coreScheme); err != nil { + panic(err) + } +} + +func ReadConfigMapV1OrDie(objBytes []byte) *corev1.ConfigMap { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ConfigMap) +} + +func ReadSecretV1OrDie(objBytes []byte) *corev1.Secret { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Secret) +} + +func ReadNamespaceV1OrDie(objBytes []byte) *corev1.Namespace { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Namespace) +} + +func ReadServiceAccountV1OrDie(objBytes []byte) *corev1.ServiceAccount { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ServiceAccount) +} + +func ReadServiceV1OrDie(objBytes []byte) *corev1.Service { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Service) +} + +func ReadPodV1OrDie(objBytes []byte) *corev1.Pod { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Pod) +} + +func WritePodV1OrDie(obj *corev1.Pod) string { + return runtime.EncodeOrDie(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), obj) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go new file mode 100644 index 00000000000..bf14899d883 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go @@ -0,0 +1,50 @@ +package resourceread + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + rbacScheme = runtime.NewScheme() + rbacCodecs = serializer.NewCodecFactory(rbacScheme) +) + +func init() { + if err := rbacv1.AddToScheme(rbacScheme); err != nil { + panic(err) + } +} + +func ReadClusterRoleBindingV1OrDie(objBytes []byte) *rbacv1.ClusterRoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRoleBinding) +} + +func ReadClusterRoleV1OrDie(objBytes []byte) *rbacv1.ClusterRole { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRole) +} + +func ReadRoleBindingV1OrDie(objBytes []byte) *rbacv1.RoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.RoleBinding) +} + +func ReadRoleV1OrDie(objBytes []byte) *rbacv1.Role { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.Role) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go new file mode 100644 index 00000000000..3a488870eb2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + storageScheme = runtime.NewScheme() + storageCodecs = serializer.NewCodecFactory(storageScheme) +) + +func init() { + if err := storagev1.AddToScheme(storageScheme); err != nil { + panic(err) + } +} + +func ReadStorageClassV1OrDie(objBytes []byte) *storagev1.StorageClass { + requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*storagev1.StorageClass) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry.go new file mode 100644 index 00000000000..bd24a5d85be --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry.go @@ -0,0 +1,59 @@ +package retry + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" +) + +// ignoreConnectionErrors is a wrapper for condition function that will cause to retry on all errors like +// connection refused, EOF, no route to host, etc. but also all 50x API server errors. +// This wrapper will return immediately on HTTP 40x client errors and those will not be retried. +func ignoreConnectionErrors(lastError *error, fn ConditionWithContextFunc) ConditionWithContextFunc { + return func(ctx context.Context) (bool, error) { + done, err := fn(ctx) + switch { + case done: + return true, err + case err == nil: + return true, nil + case IsHTTPClientError(err): + return false, err + default: + *lastError = err + return false, nil + } + } +} + +// RetryOnConnectionErrors will take context and condition function and retry the condition function until: +// 1) no error is returned +// 2) a client (4xx) HTTP error is returned +// 3) the context passed to the condition function is done +// 4) numbers of steps in the exponential backoff are met +// In case of 3) or 4) the error returned will be the last observed error from the condition function. +func RetryOnConnectionErrors(ctx context.Context, fn ConditionWithContextFunc) error { + var lastRetryErr error + err := ExponentialBackoffWithContext(ctx, retry.DefaultBackoff, ignoreConnectionErrors(&lastRetryErr, fn)) + switch err { + case wait.ErrWaitTimeout: + if lastRetryErr != nil { + return lastRetryErr + } + return err + default: + return err + } +} + +// IsHTTPClientError indicates whether the error passes is an 4xx API server error (client error). +func IsHTTPClientError(err error) bool { + switch t := err.(type) { + case errors.APIStatus: + return t.Status().Code >= 400 && t.Status().Code < 500 + default: + return false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry_test.go new file mode 100644 index 00000000000..a1502e51157 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/retry_test.go @@ -0,0 +1,122 @@ +package retry + +import ( + "context" + "fmt" + "testing" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func TestRetryOnConnectionErrors(t *testing.T) { + tests := []struct { + name string + contextTimeout time.Duration + jobDuration time.Duration + jobError error + jobDone bool + evalError func(*testing.T, error) + evalAttempts func(*testing.T, int) + }{ + { + name: "timeout on context deadline", + contextTimeout: 200 * time.Millisecond, + jobDuration: 500 * time.Millisecond, + jobError: errors.NewInternalError(fmt.Errorf("internal error")), + evalError: func(t *testing.T, e error) { + if !errors.IsInternalError(e) { + t.Errorf("expected internal server error, got %v", e) + } + }, + evalAttempts: func(t *testing.T, attempts int) { + if attempts != 1 { + t.Errorf("expected only one attempt, got %d", attempts) + } + }, + }, + { + name: "retry on internal server error", + contextTimeout: 500 * time.Millisecond, + jobDuration: 200 * time.Millisecond, + jobError: errors.NewInternalError(fmt.Errorf("internal error")), + evalError: func(t *testing.T, e error) { + if !errors.IsInternalError(e) { + t.Errorf("expected internal server error, got %v", e) + } + }, + evalAttempts: func(t *testing.T, attempts int) { + if attempts <= 1 { + t.Errorf("expected more than one attempt, got %d", attempts) + } + }, + }, + { + name: "return on not found error", + contextTimeout: 500 * time.Millisecond, + jobDuration: 100 * time.Millisecond, + jobError: errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "test-pod"), + evalError: func(t *testing.T, e error) { + if !errors.IsNotFound(e) { + t.Errorf("expected not found error, got %v", e) + } + }, + evalAttempts: func(t *testing.T, attempts int) { + if attempts != 1 { + t.Errorf("expected only one attempt, got %d", attempts) + } + }, + }, + { + name: "return on no error", + contextTimeout: 500 * time.Millisecond, + jobDuration: 50 * time.Millisecond, + evalError: func(t *testing.T, e error) { + if e != nil { + t.Errorf("expected no error, got %v", e) + } + }, + evalAttempts: func(t *testing.T, attempts int) { + if attempts != 1 { + t.Errorf("expected only one attempt, got %d", attempts) + } + }, + }, + { + name: "return on done", + contextTimeout: 500 * time.Millisecond, + jobDuration: 50 * time.Millisecond, + jobDone: true, + evalError: func(t *testing.T, e error) { + if e != nil { + t.Errorf("expected no error, got %v", e) + } + }, + evalAttempts: func(t *testing.T, attempts int) { + if attempts != 1 { + t.Errorf("expected only one attempt, got %d", attempts) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.TODO(), test.contextTimeout) + defer cancel() + attempts := 0 + err := RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) { + time.Sleep(test.jobDuration) + attempts++ + if test.jobError != nil { + return test.jobDone, test.jobError + } + return test.jobDone, nil + }) + test.evalError(t, err) + test.evalAttempts(t, attempts) + }) + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/wait.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/wait.go new file mode 100644 index 00000000000..49d7e600a3b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/retry/wait.go @@ -0,0 +1,36 @@ +package retry + +import ( + "context" + + "k8s.io/apimachinery/pkg/util/wait" +) + +// TODO: This should be added to k8s.io/client-go/util/retry + +// ConditionWithContextFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. The context passed to condition function allow function body +// to return faster than context.Done(). +type ConditionWithContextFunc func(ctx context.Context) (done bool, err error) + +// ExponentialBackoffWithContext repeats a condition check with exponential backoff and stop repeating +// when the context passed to this function is done. +// +// It checks the condition up to Steps times, increasing the wait by multiplying +// the previous duration by Factor. +// +// If Jitter is greater than zero, a random amount of each duration is added +// (between duration and duration*(1+jitter)). +// +// If the condition never returns true, ErrWaitTimeout is returned. All other +// errors terminate immediately. +func ExponentialBackoffWithContext(ctx context.Context, backoff wait.Backoff, condition ConditionWithContextFunc) error { + return wait.ExponentialBackoff(backoff, func() (bool, error) { + select { + case <-ctx.Done(): + return false, wait.ErrWaitTimeout + default: + return condition(ctx) + } + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go new file mode 100644 index 00000000000..f5a26338b73 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go @@ -0,0 +1,67 @@ +package resourcesynccontroller + +import ( + "crypto/x509" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/util/cert" + + "github.com/openshift/library-go/pkg/crypto" +) + +func CombineCABundleConfigMaps(destinationConfigMap ResourceLocation, lister corev1listers.ConfigMapLister, inputConfigMaps ...ResourceLocation) (*corev1.ConfigMap, error) { + certificates := []*x509.Certificate{} + for _, input := range inputConfigMaps { + inputConfigMap, err := lister.ConfigMaps(input.Namespace).Get(input.Name) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + return nil, err + } + + // configmaps must conform to this + inputContent := inputConfigMap.Data["ca-bundle.crt"] + if len(inputContent) == 0 { + continue + } + inputCerts, err := cert.ParseCertsPEM([]byte(inputContent)) + if err != nil { + return nil, fmt.Errorf("configmap/%s in %q is malformed: %v", input.Name, input.Namespace, err) + } + certificates = append(certificates, inputCerts...) + } + + certificates = crypto.FilterExpiredCerts(certificates...) + finalCertificates := []*x509.Certificate{} + // now check for duplicates. n^2, but super simple + for i := range certificates { + found := false + for j := range finalCertificates { + if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) { + found = true + break + } + } + if !found { + finalCertificates = append(finalCertificates, certificates[i]) + } + } + + caBytes, err := crypto.EncodeCertificates(finalCertificates...) + if err != nil { + return nil, err + } + + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: destinationConfigMap.Namespace, Name: destinationConfigMap.Name}, + Data: map[string]string{ + "ca-bundle.crt": string(caBytes), + }, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go new file mode 100644 index 00000000000..344eddd8302 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go @@ -0,0 +1,19 @@ +package resourcesynccontroller + +// ResourceLocation describes coordinates for a resource to be synced +type ResourceLocation struct { + Namespace string `json:"namespace"` + Name string `json:"name"` +} + +var emptyResourceLocation = ResourceLocation{} + +// ResourceSyncer allows changes to syncing rules by this controller +type ResourceSyncer interface { + // SyncConfigMap indicates that a configmap should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncConfigMap(destination, source ResourceLocation) error + // SyncSecret indicates that a secret should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncSecret(destination, source ResourceLocation) error +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go new file mode 100644 index 00000000000..6f11b6c9615 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go @@ -0,0 +1,328 @@ +package resourcesynccontroller + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const controllerWorkQueueKey = "key" + +// ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations. +// It will also mirror deletions by deleting destinations. +type ResourceSyncController struct { + // syncRuleLock is used to ensure we avoid races on changes to syncing rules + syncRuleLock sync.RWMutex + // configMapSyncRules is a map from destination location to source location + configMapSyncRules map[ResourceLocation]ResourceLocation + // secretSyncRules is a map from destination location to source location + secretSyncRules map[ResourceLocation]ResourceLocation + + // knownNamespaces is the list of namespaces we are watching. + knownNamespaces sets.String + + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces + operatorConfigClient v1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +var _ ResourceSyncer = &ResourceSyncController{} + +// NewResourceSyncController creates ResourceSyncController. +func NewResourceSyncController( + operatorConfigClient v1helpers.OperatorClient, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + secretsGetter corev1client.SecretsGetter, + configMapsGetter corev1client.ConfigMapsGetter, + eventRecorder events.Recorder, +) *ResourceSyncController { + c := &ResourceSyncController{ + operatorConfigClient: operatorConfigClient, + eventRecorder: eventRecorder.WithComponentSuffix("resource-sync-controller"), + + configMapSyncRules: map[ResourceLocation]ResourceLocation{}, + secretSyncRules: map[ResourceLocation]ResourceLocation{}, + kubeInformersForNamespaces: kubeInformersForNamespaces, + knownNamespaces: kubeInformersForNamespaces.Namespaces(), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ResourceSyncController"), + configMapGetter: configMapsGetter, + secretGetter: secretsGetter, + } + + for namespace := range kubeInformersForNamespaces.Namespaces() { + if len(namespace) == 0 { + continue + } + informers := kubeInformersForNamespaces.InformersFor(namespace) + informers.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) + informers.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, informers.Core().V1().ConfigMaps().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, informers.Core().V1().Secrets().Informer().HasSynced) + } + + // we watch this just in case someone messes with our status + operatorConfigClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorConfigClient.Informer().HasSynced) + + return c +} + +func (c *ResourceSyncController) SyncConfigMap(destination, source ResourceLocation) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.configMapSyncRules[destination] = source + + // make sure the new rule is picked up + c.queue.Add(controllerWorkQueueKey) + return nil +} + +func (c *ResourceSyncController) SyncSecret(destination, source ResourceLocation) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.secretSyncRules[destination] = source + + // make sure the new rule is picked up + c.queue.Add(controllerWorkQueueKey) + return nil +} + +func (c *ResourceSyncController) sync() error { + operatorSpec, _, _, err := c.operatorConfigClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + c.syncRuleLock.RLock() + defer c.syncRuleLock.RUnlock() + + errors := []error{} + + for destination, source := range c.configMapSyncRules { + if source == emptyResourceLocation { + // use the cache to check whether the configmap exists in target namespace, if not skip the extra delete call. + if _, err := c.configMapGetter.ConfigMaps(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.configMapGetter.ConfigMaps(destination.Namespace).Delete(destination.Name, nil); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncConfigMap(c.configMapGetter, c.eventRecorder, source.Namespace, source.Name, destination.Namespace, destination.Name, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, err) + } + } + for destination, source := range c.secretSyncRules { + if source == emptyResourceLocation { + // use the cache to check whether the secret exists in target namespace, if not skip the extra delete call. + if _, err := c.secretGetter.Secrets(destination.Namespace).Get(destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.secretGetter.Secrets(destination.Namespace).Delete(destination.Name, nil); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncSecret(c.secretGetter, c.eventRecorder, source.Namespace, source.Name, destination.Namespace, destination.Name, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + cond := operatorv1.OperatorCondition{ + Type: condition.ResourceSyncControllerDegradedConditionType, + Status: operatorv1.ConditionTrue, + Reason: "Error", + Message: v1helpers.NewMultiLineAggregate(errors).Error(), + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: condition.ResourceSyncControllerDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil +} + +func (c *ResourceSyncController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting ResourceSyncController") + defer klog.Infof("Shutting down ResourceSyncController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *ResourceSyncController) runWorker(_ context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *ResourceSyncController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *ResourceSyncController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} + +func NewDebugHandler(controller *ResourceSyncController) http.Handler { + return &debugHTTPHandler{controller: controller} +} + +type debugHTTPHandler struct { + controller *ResourceSyncController +} + +type ResourceSyncRule struct { + Source ResourceLocation `json:"source"` + Destination ResourceLocation `json:"destination"` +} + +type ResourceSyncRuleList []ResourceSyncRule + +func (l ResourceSyncRuleList) Len() int { return len(l) } +func (l ResourceSyncRuleList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l ResourceSyncRuleList) Less(i, j int) bool { + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) < 0 { + return true + } + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) > 0 { + return false + } + if strings.Compare(l[i].Source.Name, l[j].Source.Name) < 0 { + return true + } + return false +} + +type ControllerSyncRules struct { + Secrets ResourceSyncRuleList `json:"secrets"` + Configs ResourceSyncRuleList `json:"configs"` +} + +// ServeSyncRules provides a handler function to return the sync rules of the controller +func (h *debugHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + syncRules := ControllerSyncRules{ResourceSyncRuleList{}, ResourceSyncRuleList{}} + + h.controller.syncRuleLock.RLock() + defer h.controller.syncRuleLock.RUnlock() + syncRules.Secrets = append(syncRules.Secrets, resourceSyncRuleList(h.controller.secretSyncRules)...) + syncRules.Configs = append(syncRules.Configs, resourceSyncRuleList(h.controller.configMapSyncRules)...) + + data, err := json.Marshal(syncRules) + if err != nil { + w.Write([]byte(err.Error())) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Write(data) + w.WriteHeader(http.StatusOK) +} + +func resourceSyncRuleList(syncRules map[ResourceLocation]ResourceLocation) ResourceSyncRuleList { + rules := make(ResourceSyncRuleList, 0, len(syncRules)) + for src, dest := range syncRules { + rule := ResourceSyncRule{ + Source: src, + Destination: dest, + } + rules = append(rules, rule) + } + sort.Sort(rules) + return rules +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go new file mode 100644 index 00000000000..1981dfb1241 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go @@ -0,0 +1,292 @@ +package resourcesynccontroller + +import ( + "context" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime" + ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + "github.com/openshift/library-go/pkg/operator/v1helpers" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestSyncSecret(t *testing.T) { + kubeClient := fake.NewSimpleClientset( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "config", Name: "foo"}, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "operator", Name: "to-remove"}, + }, + ) + + destinationSecretCreated := make(chan struct{}) + destinationSecretBarChecked := make(chan struct{}) + destinationSecretEmptySourceChecked := make(chan struct{}) + + kubeClient.PrependReactor("create", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + actual, isCreate := action.(ktesting.CreateAction) + if !isCreate { + return false, nil, nil + } + secret, isSecret := actual.GetObject().(*corev1.Secret) + if !isSecret { + return false, nil, nil + } + if secret.Name == "foo" && secret.Namespace == "operator" { + close(destinationSecretCreated) + } + return false, nil, nil + }) + + deleteSecretCounterMutex := sync.Mutex{} + deleteSecretCounter := 0 + + kubeClient.PrependReactor("delete", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + deleteSecretCounterMutex.Lock() + defer deleteSecretCounterMutex.Unlock() + deleteSecretCounter++ + return false, nil, nil + }) + + kubeClient.PrependReactor("get", "secrets", func(action ktesting.Action) (bool, runtime.Object, error) { + actual, isGet := action.(ktesting.GetAction) + if !isGet { + return false, nil, nil + } + if actual.GetNamespace() == "operator" { + switch actual.GetName() { + case "bar": + close(destinationSecretBarChecked) + case "empty-source": + close(destinationSecretEmptySourceChecked) + } + } + return false, nil, nil + }) + + secretInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("config")) + operatorInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("operator")) + fakeStaticPodOperatorClient := v1helpers.NewFakeOperatorClient( + &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + &operatorv1.OperatorStatus{}, + nil, + ) + eventRecorder := eventstesting.NewTestingEventRecorder(t) + c := NewResourceSyncController( + fakeStaticPodOperatorClient, + v1helpers.NewFakeKubeInformersForNamespaces(map[string]informers.SharedInformerFactory{ + "config": secretInformers, + "operator": operatorInformers, + }), + kubeClient.CoreV1(), + kubeClient.CoreV1(), + eventRecorder, + ) + c.cachesToSync = []cache.InformerSynced{ + secretInformers.Core().V1().Secrets().Informer().HasSynced, + } + c.configMapGetter = kubeClient.CoreV1() + c.secretGetter = kubeClient.CoreV1() + + ctx, ctxCancel := context.WithCancel(context.TODO()) + defer ctxCancel() + + go secretInformers.Start(ctx.Done()) + go c.Run(ctx, 1) + + // The source secret was removed (404) but the destination exists. This should increase the "deleteSecretCounter" + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "to-remove"}, ResourceLocation{Namespace: "config", Name: "removed"}); err != nil { + t.Fatal(err) + } + + // The source secret exists, but the destination does not. This should close the "destinationSecretCreated" channel + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "foo"}, ResourceLocation{Namespace: "config", Name: "foo"}); err != nil { + t.Fatal(err) + } + + // The source secret does not exists nor the destination secret. This should close the "destinationSecretBarChecked" and should not increase + // the deleteSecretCounter (we don't issue Delete() call when Get() returns 404) + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "bar"}, ResourceLocation{Namespace: "config", Name: "bar"}); err != nil { + t.Fatal(err) + } + + // The source resource location is not set and the destination does not exists. This should close the "destinationSecretEmptySourceChecked" and + // should not increase the deleteSecretCounter (this is special case in resource sync controller. + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "empty-source"}, ResourceLocation{}); err != nil { + t.Fatal(err) + } + + select { + case <-destinationSecretCreated: + case <-time.After(10 * time.Second): + t.Fatal("timeout while waiting for destination secret to be created") + } + + select { + case <-destinationSecretBarChecked: + case <-time.After(10 * time.Second): + t.Fatal("timeout while waiting for destination secret 'bar' to be checked for existence") + } + + select { + case <-destinationSecretEmptySourceChecked: + case <-time.After(10 * time.Second): + t.Fatal("timeout while waiting for destination secret 'empty-source' to be checked for existence") + } + + deleteSecretCounterMutex.Lock() + defer deleteSecretCounterMutex.Unlock() + if deleteSecretCounter != 1 { + t.Fatalf("expected exactly 1 delete call for this test, got %d", deleteSecretCounter) + } +} + +func TestSyncConfigMap(t *testing.T) { + kubeClient := fake.NewSimpleClientset( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "other", Name: "foo"}, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "other", Name: "foo"}, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "config", Name: "bar"}, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "config-managed", Name: "pear"}, + }, + ) + + configInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("config")) + configManagedInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("config-managed")) + operatorInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("operator")) + + fakeStaticPodOperatorClient := v1helpers.NewFakeOperatorClient( + &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + &operatorv1.OperatorStatus{}, + nil, + ) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + + kubeInformersForNamespaces := v1helpers.NewFakeKubeInformersForNamespaces(map[string]informers.SharedInformerFactory{"other": configInformers}) + + c := NewResourceSyncController( + fakeStaticPodOperatorClient, + v1helpers.NewFakeKubeInformersForNamespaces(map[string]informers.SharedInformerFactory{ + "config": configInformers, + "config-managed": configManagedInformers, + "operator": operatorInformers, + }), + v1helpers.CachedSecretGetter(kubeClient.CoreV1(), kubeInformersForNamespaces), + v1helpers.CachedConfigMapGetter(kubeClient.CoreV1(), kubeInformersForNamespaces), + eventRecorder, + ) + c.configMapGetter = kubeClient.CoreV1() + c.secretGetter = kubeClient.CoreV1() + + // sync ones for namespaces we don't have + if err := c.SyncSecret(ResourceLocation{Namespace: "other", Name: "foo"}, ResourceLocation{Namespace: "operator", Name: "foo"}); err == nil || err.Error() != `not watching namespace "other"` { + t.Error(err) + } + if err := c.SyncSecret(ResourceLocation{Namespace: "config", Name: "foo"}, ResourceLocation{Namespace: "other", Name: "foo"}); err == nil || err.Error() != `not watching namespace "other"` { + t.Error(err) + } + if err := c.SyncConfigMap(ResourceLocation{Namespace: "other", Name: "foo"}, ResourceLocation{Namespace: "operator", Name: "foo"}); err == nil || err.Error() != `not watching namespace "other"` { + t.Error(err) + } + if err := c.SyncConfigMap(ResourceLocation{Namespace: "config", Name: "foo"}, ResourceLocation{Namespace: "other", Name: "foo"}); err == nil || err.Error() != `not watching namespace "other"` { + t.Error(err) + } + + // register + kubeClient.ClearActions() + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "foo"}, ResourceLocation{Namespace: "config", Name: "bar"}); err != nil { + t.Fatal(err) + } + if err := c.SyncConfigMap(ResourceLocation{Namespace: "operator", Name: "apple"}, ResourceLocation{Namespace: "config-managed", Name: "pear"}); err != nil { + t.Fatal(err) + } + if err := c.sync(); err != nil { + t.Fatal(err) + } + if _, err := kubeClient.CoreV1().Secrets("operator").Get("foo", metav1.GetOptions{}); err != nil { + t.Error(err) + } + if _, err := kubeClient.CoreV1().ConfigMaps("operator").Get("apple", metav1.GetOptions{}); err != nil { + t.Error(err) + } + + // clear + kubeClient.ClearActions() + if err := c.SyncSecret(ResourceLocation{Namespace: "operator", Name: "foo"}, ResourceLocation{}); err != nil { + t.Fatal(err) + } + if err := c.SyncConfigMap(ResourceLocation{Namespace: "operator", Name: "apple"}, ResourceLocation{}); err != nil { + t.Fatal(err) + } + if err := c.sync(); err != nil { + t.Fatal(err) + } + if _, err := kubeClient.CoreV1().Secrets("operator").Get("foo", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Error(err) + } + if _, err := kubeClient.CoreV1().ConfigMaps("operator").Get("apple", metav1.GetOptions{}); !apierrors.IsNotFound(err) { + t.Error(err) + } +} + +func TestServeHTTP(t *testing.T) { + c := &ResourceSyncController{ + secretSyncRules: map[ResourceLocation]ResourceLocation{ + {Namespace: "foo", Name: "cat"}: {Namespace: "bar", Name: "cat"}, + {Namespace: "test", Name: "dog"}: {Namespace: "othertest", Name: "dog"}, + {Namespace: "foo", Name: "dog"}: {Namespace: "bar", Name: "dog"}, + }, + configMapSyncRules: map[ResourceLocation]ResourceLocation{ + {Namespace: "a", Name: "b"}: {Namespace: "foo", Name: "bar"}, + {Namespace: "a", Name: "c"}: {Namespace: "foo", Name: "barc"}, + {Namespace: "bar", Name: "b"}: {Namespace: "foo", Name: "baz"}, + }, + } + + expected := `{"secrets":[` + + `{"source":{"namespace":"foo","name":"cat"},"destination":{"namespace":"bar","name":"cat"}},` + + `{"source":{"namespace":"foo","name":"dog"},"destination":{"namespace":"bar","name":"dog"}},` + + `{"source":{"namespace":"test","name":"dog"},"destination":{"namespace":"othertest","name":"dog"}}],` + + `"configs":[` + + `{"source":{"namespace":"a","name":"b"},"destination":{"namespace":"foo","name":"bar"}},` + + `{"source":{"namespace":"a","name":"c"},"destination":{"namespace":"foo","name":"barc"}},` + + `{"source":{"namespace":"bar","name":"b"},"destination":{"namespace":"foo","name":"baz"}}]}` + + handler := NewDebugHandler(c) + writer := httptest.NewRecorder() + handler.ServeHTTP(writer, &http.Request{}) + if writer.Body == nil { + t.Fatal("expected a body") + } + response := writer.Body.String() + if response != expected { + t.Errorf("Expected:%+v\n Got: %+v\n", expected, response) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/revision_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/revision_controller.go new file mode 100644 index 00000000000..ee6307f4fbf --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/revision_controller.go @@ -0,0 +1,380 @@ +package revisioncontroller + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const revisionControllerWorkQueueKey = "key" + +// LatestRevisionClient is an operator client for an operator status with a latest revision field. +type LatestRevisionClient interface { + v1helpers.OperatorClient + + // GetLatestRevisionState returns the spec, status and latest revision. + GetLatestRevisionState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, rev int32, rv string, err error) + // UpdateLatestRevisionOperatorStatus updates the status with the given latestAvailableRevision and the by applying the given updateFuncs. + UpdateLatestRevisionOperatorStatus(latestAvailableRevision int32, updateFuncs ...v1helpers.UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) +} + +// RevisionController is a controller that watches a set of configmaps and secrets and them against a revision snapshot +// of them. If the original resources changes, the revision counter is increased, stored in LatestAvailableRevision +// field of the operator config and new snapshots suffixed by the revision are created. +type RevisionController struct { + targetNamespace string + // configMaps is the list of configmaps that are directly copied.A different actor/controller modifies these. + // the first element should be the configmap that contains the static pod manifest + configMaps []RevisionResource + // secrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these. + secrets []RevisionResource + + operatorClient LatestRevisionClient + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +type RevisionResource struct { + Name string + Optional bool +} + +// NewRevisionController create a new revision controller. +func NewRevisionController( + targetNamespace string, + configMaps []RevisionResource, + secrets []RevisionResource, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + operatorClient LatestRevisionClient, + configMapGetter corev1client.ConfigMapsGetter, + secretGetter corev1client.SecretsGetter, + eventRecorder events.Recorder, +) *RevisionController { + c := &RevisionController{ + targetNamespace: targetNamespace, + configMaps: configMaps, + secrets: secrets, + + operatorClient: operatorClient, + configMapGetter: configMapGetter, + secretGetter: secretGetter, + eventRecorder: eventRecorder.WithComponentSuffix("revision-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RevisionController"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().ConfigMaps().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Secrets().Informer().HasSynced) + + return c +} + +// createRevisionIfNeeded takes care of creating content for the static pods to use. +// returns whether or not requeue and if an error happened when updating status. Normally it updates status itself. +func (c RevisionController) createRevisionIfNeeded(latestAvailableRevision int32, resourceVersion string) (bool, error) { + isLatestRevisionCurrent, reason := c.isLatestRevisionCurrent(latestAvailableRevision) + + // check to make sure that the latestRevision has the exact content we expect. No mutation here, so we start creating the next Revision only when it is required + if isLatestRevisionCurrent { + return false, nil + } + + nextRevision := latestAvailableRevision + 1 + c.eventRecorder.Eventf("RevisionTriggered", "new revision %d triggered by %q", nextRevision, reason) + if err := c.createNewRevision(nextRevision); err != nil { + cond := operatorv1.OperatorCondition{ + Type: "RevisionControllerDegraded", + Status: operatorv1.ConditionTrue, + Reason: "ContentCreationError", + Message: err.Error(), + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + c.eventRecorder.Warningf("RevisionCreateFailed", "Failed to create revision %d: %v", nextRevision, err.Error()) + return true, updateError + } + return true, nil + } + + cond := operatorv1.OperatorCondition{ + Type: "RevisionControllerDegraded", + Status: operatorv1.ConditionFalse, + } + if _, updated, updateError := c.operatorClient.UpdateLatestRevisionOperatorStatus(nextRevision, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return true, updateError + } else if updated { + c.eventRecorder.Eventf("RevisionCreate", "Revision %d created because %s", latestAvailableRevision, reason) + } + + return false, nil +} + +func nameFor(name string, revision int32) string { + return fmt.Sprintf("%s-%d", name, revision) +} + +// isLatestRevisionCurrent returns whether the latest revision is up to date and an optional reason +func (c RevisionController) isLatestRevisionCurrent(revision int32) (bool, string) { + configChanges := []string{} + for _, cm := range c.configMaps { + requiredData := map[string]string{} + existingData := map[string]string{} + + required, err := c.configMapGetter.ConfigMaps(c.targetNamespace).Get(cm.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !cm.Optional { + return false, err.Error() + } + existing, err := c.configMapGetter.ConfigMaps(c.targetNamespace).Get(nameFor(cm.Name, revision), metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !cm.Optional { + return false, err.Error() + } + if required != nil { + requiredData = required.Data + } + if existing != nil { + existingData = existing.Data + } + if !equality.Semantic.DeepEqual(existingData, requiredData) { + if klog.V(4) { + klog.Infof("configmap %q changes for revision %d: %s", cm.Name, revision, resourceapply.JSONPatchNoError(existing, required)) + } + configChanges = append(configChanges, fmt.Sprintf("configmap/%s has changed", cm.Name)) + } + } + + secretChanges := []string{} + for _, s := range c.secrets { + requiredData := map[string][]byte{} + existingData := map[string][]byte{} + + required, err := c.secretGetter.Secrets(c.targetNamespace).Get(s.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !s.Optional { + return false, err.Error() + } + existing, err := c.secretGetter.Secrets(c.targetNamespace).Get(nameFor(s.Name, revision), metav1.GetOptions{}) + if apierrors.IsNotFound(err) && !s.Optional { + return false, err.Error() + } + if required != nil { + requiredData = required.Data + } + if existing != nil { + existingData = existing.Data + } + if !equality.Semantic.DeepEqual(existingData, requiredData) { + if klog.V(4) { + klog.Infof("Secret %q changes for revision %d: %s", s.Name, revision, resourceapply.JSONPatchSecretNoError(existing, required)) + } + secretChanges = append(secretChanges, fmt.Sprintf("secret/%s has changed", s.Name)) + } + } + + if len(secretChanges) > 0 || len(configChanges) > 0 { + return false, strings.Join(append(secretChanges, configChanges...), ",") + } + + return true, "" +} + +func (c RevisionController) createNewRevision(revision int32) error { + statusConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: c.targetNamespace, + Name: nameFor("revision-status", revision), + }, + Data: map[string]string{ + "status": "InProgress", + "revision": fmt.Sprintf("%d", revision), + }, + } + statusConfigMap, _, err := resourceapply.ApplyConfigMap(c.configMapGetter, c.eventRecorder, statusConfigMap) + if err != nil { + return err + } + ownerRefs := []metav1.OwnerReference{{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: statusConfigMap.Name, + UID: statusConfigMap.UID, + }} + + for _, cm := range c.configMaps { + obj, _, err := resourceapply.SyncConfigMap(c.configMapGetter, c.eventRecorder, c.targetNamespace, cm.Name, c.targetNamespace, nameFor(cm.Name, revision), ownerRefs) + if err != nil { + return err + } + if obj == nil && !cm.Optional { + return apierrors.NewNotFound(corev1.Resource("configmaps"), cm.Name) + } + } + for _, s := range c.secrets { + obj, _, err := resourceapply.SyncSecret(c.secretGetter, c.eventRecorder, c.targetNamespace, s.Name, c.targetNamespace, nameFor(s.Name, revision), ownerRefs) + if err != nil { + return err + } + if obj == nil && !s.Optional { + return apierrors.NewNotFound(corev1.Resource("secrets"), s.Name) + } + } + + return nil +} + +// getLatestAvailableRevision returns the latest known revision to the operator +// This is either the LatestAvailableRevision in the status or by checking revision status configmaps +func (c RevisionController) getLatestAvailableRevision(operatorStatus *operatorv1.OperatorStatus) (int32, error) { + configMaps, err := c.configMapGetter.ConfigMaps(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return 0, err + } + var latestRevision int32 + for _, configMap := range configMaps.Items { + if !strings.HasPrefix(configMap.Name, "revision-status-") { + continue + } + if revision, ok := configMap.Data["revision"]; ok { + revisionNumber, err := strconv.Atoi(revision) + if err != nil { + return 0, err + } + if int32(revisionNumber) > latestRevision { + latestRevision = int32(revisionNumber) + } + } + } + // If there are no configmaps, then this should actually be revision 0 + return latestRevision, nil +} + +func (c RevisionController) sync() error { + operatorSpec, originalOperatorStatus, latestAvailableRevision, resourceVersion, err := c.operatorClient.GetLatestRevisionState() + if err != nil { + return err + } + operatorStatus := originalOperatorStatus.DeepCopy() + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + // If the operator status has 0 as its latest available revision, this is either the first revision + // or possibly the operator resource was deleted and reset back to 0, which is not what we want so check configmaps + if latestAvailableRevision == 0 { + // Check to see if current revision is accurate and if not, search through configmaps for latest revision + latestRevision, err := c.getLatestAvailableRevision(operatorStatus) + if err != nil { + return err + } + if latestRevision != 0 { + // Then make sure that revision number is what's in the operator status + _, _, err = c.operatorClient.UpdateLatestRevisionOperatorStatus(latestRevision) + // If we made a change return and requeue with the correct status + return fmt.Errorf("synthetic requeue request (err: %v)", err) + } + } + + requeue, syncErr := c.createRevisionIfNeeded(latestAvailableRevision, resourceVersion) + if requeue && syncErr == nil { + return fmt.Errorf("synthetic requeue request (err: %v)", syncErr) + } + err = syncErr + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: condition.RevisionControllerDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if err != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *RevisionController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting RevisionController") + defer klog.Infof("Shutting down RevisionController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *RevisionController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *RevisionController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *RevisionController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(revisionControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(revisionControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(revisionControllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/revision_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/revision_controller_test.go new file mode 100644 index 00000000000..62d87cd0bda --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/revision_controller_test.go @@ -0,0 +1,471 @@ +package revisioncontroller + +import ( + "strings" + "testing" + "time" + + "github.com/openshift/library-go/pkg/operator/v1helpers" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + clienttesting "k8s.io/client-go/testing" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" +) + +func filterCreateActions(actions []clienttesting.Action) []runtime.Object { + var createdObjects []runtime.Object + for _, a := range actions { + createAction, isCreate := a.(clienttesting.CreateAction) + if !isCreate { + continue + } + _, isEvent := createAction.GetObject().(*v1.Event) + if isEvent { + continue + } + createdObjects = append(createdObjects, createAction.GetObject()) + } + return createdObjects +} + +const targetNamespace = "copy-resources" + +func TestRevisionController(t *testing.T) { + tests := []struct { + testName string + targetNamespace string + testSecrets []RevisionResource + testConfigs []RevisionResource + startingObjects []runtime.Object + staticPodOperatorClient v1helpers.StaticPodOperatorClient + validateActions func(t *testing.T, actions []clienttesting.Action) + validateStatus func(t *testing.T, status *operatorv1.StaticPodOperatorStatus) + expectSyncError string + }{ + { + testName: "set-latest-revision-by-configmap", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 0, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + testConfigs: []RevisionResource{{Name: "test-config"}}, + testSecrets: []RevisionResource{{Name: "test-secret"}}, + startingObjects: []runtime.Object{ + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status", Namespace: targetNamespace}}, + &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: targetNamespace}, + Data: map[string]string{"revision": "1"}, + }, + &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: targetNamespace}, + Data: map[string]string{"revision": "2"}, + }, + }, + validateStatus: func(t *testing.T, status *operatorv1.StaticPodOperatorStatus) { + if status.LatestAvailableRevision != 2 { + t.Errorf("expected status LatestAvailableRevision to be 2, got %v", status.LatestAvailableRevision) + } + }, + }, + { + testName: "operator-unmanaged", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Unmanaged, + }, + }, + &operatorv1.StaticPodOperatorStatus{}, + nil, + nil, + ), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 0 { + t.Errorf("expected no objects to be created, got %d", createdObjectCount) + } + }, + }, + { + testName: "missing-source-resources", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + testConfigs: []RevisionResource{{Name: "test-config"}}, + testSecrets: []RevisionResource{{Name: "test-secret"}}, + expectSyncError: "synthetic requeue request", + validateStatus: func(t *testing.T, status *operatorv1.StaticPodOperatorStatus) { + if status.Conditions[0].Type != "RevisionControllerDegraded" { + t.Errorf("expected status condition to be 'RevisionControllerFailing', got %v", status.Conditions[0].Type) + } + if status.Conditions[0].Reason != "ContentCreationError" { + t.Errorf("expected status condition reason to be 'ContentCreationError', got %v", status.Conditions[0].Reason) + } + if !strings.Contains(status.Conditions[0].Message, `configmaps "test-config" not found`) { + t.Errorf("expected status to be 'configmaps test-config not found', got: %s", status.Conditions[0].Message) + } + }, + }, + { + testName: "copy-resources", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 0, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + startingObjects: []runtime.Object{ + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status", Namespace: targetNamespace}}, + }, + testConfigs: []RevisionResource{{Name: "test-config"}}, + testSecrets: []RevisionResource{{Name: "test-secret"}}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 3 { + t.Errorf("expected 3 objects to be created, got %d: %+v", createdObjectCount, createdObjects) + return + } + revisionStatus, hasStatus := createdObjects[0].(*v1.ConfigMap) + if !hasStatus { + t.Errorf("expected config to be created") + return + } + if revisionStatus.Name != "revision-status-1" { + t.Errorf("expected config to have name 'revision-status-1', got %q", revisionStatus.Name) + } + config, hasConfig := createdObjects[1].(*v1.ConfigMap) + if !hasConfig { + t.Errorf("expected config to be created") + return + } + if config.Name != "test-config-1" { + t.Errorf("expected config to have name 'test-config-1', got %q", config.Name) + } + if len(config.OwnerReferences) != 1 { + t.Errorf("expected config to have ownerreferences set, got %+v", config.OwnerReferences) + } + secret, hasSecret := createdObjects[2].(*v1.Secret) + if !hasSecret { + t.Errorf("expected secret to be created") + return + } + if secret.Name != "test-secret-1" { + t.Errorf("expected secret to have name 'test-secret-1', got %q", secret.Name) + } + if len(secret.OwnerReferences) != 1 { + t.Errorf("expected secret to have ownerreferences set, got %+v", secret.OwnerReferences) + } + }, + }, + { + testName: "copy-resources-opt", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 0, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + startingObjects: []runtime.Object{ + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret-opt", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config-opt", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status", Namespace: targetNamespace}}, + }, + testConfigs: []RevisionResource{{Name: "test-config"}, {Name: "test-config-opt", Optional: true}}, + testSecrets: []RevisionResource{{Name: "test-secret"}, {Name: "test-secret-opt", Optional: true}}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 5 { + t.Errorf("expected 5 objects to be created, got %d: %+v", createdObjectCount, createdObjects) + return + } + revisionStatus, hasStatus := createdObjects[0].(*v1.ConfigMap) + if !hasStatus { + t.Errorf("expected config to be created") + return + } + if revisionStatus.Name != "revision-status-1" { + t.Errorf("expected config to have name 'revision-status-1', got %q", revisionStatus.Name) + } + config, hasConfig := createdObjects[1].(*v1.ConfigMap) + if !hasConfig { + t.Errorf("expected config to be created") + return + } + if config.Name != "test-config-1" { + t.Errorf("expected config to have name 'test-config-1', got %q", config.Name) + } + config, hasConfig = createdObjects[2].(*v1.ConfigMap) + if !hasConfig { + t.Errorf("expected config to be created") + return + } + if config.Name != "test-config-opt-1" { + t.Errorf("expected config to have name 'test-config-opt-1', got %q", config.Name) + } + secret, hasSecret := createdObjects[3].(*v1.Secret) + if !hasSecret { + t.Errorf("expected secret to be created") + return + } + if secret.Name != "test-secret-1" { + t.Errorf("expected secret to have name 'test-secret-1', got %q", secret.Name) + } + secret, hasSecret = createdObjects[4].(*v1.Secret) + if !hasSecret { + t.Errorf("expected secret to be created") + return + } + if secret.Name != "test-secret-opt-1" { + t.Errorf("expected secret to have name 'test-secret-opt-1', got %q", secret.Name) + } + }, + }, + { + testName: "copy-resources-opt-missing", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 0, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + startingObjects: []runtime.Object{ + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status", Namespace: targetNamespace}}, + }, + testConfigs: []RevisionResource{{Name: "test-config"}, {Name: "test-config-opt", Optional: true}}, + testSecrets: []RevisionResource{{Name: "test-secret"}, {Name: "test-secret-opt", Optional: true}}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 3 { + t.Errorf("expected 3 objects to be created, got %d: %+v", createdObjectCount, createdObjects) + return + } + revisionStatus, hasStatus := createdObjects[0].(*v1.ConfigMap) + if !hasStatus { + t.Errorf("expected config to be created") + return + } + if revisionStatus.Name != "revision-status-1" { + t.Errorf("expected config to have name 'revision-status-1', got %q", revisionStatus.Name) + } + config, hasConfig := createdObjects[1].(*v1.ConfigMap) + if !hasConfig { + t.Errorf("expected config to be created") + return + } + if config.Name != "test-config-1" { + t.Errorf("expected config to have name 'test-config-1', got %q", config.Name) + } + secret, hasSecret := createdObjects[2].(*v1.Secret) + if !hasSecret { + t.Errorf("expected secret to be created") + return + } + if secret.Name != "test-secret-1" { + t.Errorf("expected secret to have name 'test-secret-1', got %q", secret.Name) + } + }, + }, + { + testName: "latest-revision-current", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + startingObjects: []runtime.Object{ + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret-1", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config-1", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: targetNamespace}}, + }, + testConfigs: []RevisionResource{{Name: "test-config"}}, + testSecrets: []RevisionResource{{Name: "test-secret"}}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 0 { + t.Errorf("expected no objects to be created, got %d", createdObjectCount) + } + }, + }, + { + testName: "latest-revision-current-optionals-missing", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + startingObjects: []runtime.Object{ + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret-1", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config-1", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: targetNamespace}}, + }, + testConfigs: []RevisionResource{{Name: "test-config"}, {Name: "test-config-opt", Optional: true}}, + testSecrets: []RevisionResource{{Name: "test-secret"}, {Name: "test-secret-opt", Optional: true}}, + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 0 { + t.Errorf("expected no objects to be created, got %d", createdObjectCount) + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(tc.startingObjects...) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + + c := NewRevisionController( + tc.targetNamespace, + tc.testConfigs, + tc.testSecrets, + informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace(tc.targetNamespace)), + StaticPodLatestRevisionClient{StaticPodOperatorClient: tc.staticPodOperatorClient}, + kubeClient.CoreV1(), + kubeClient.CoreV1(), + eventRecorder, + ) + syncErr := c.sync() + if tc.validateStatus != nil { + _, status, _, _ := tc.staticPodOperatorClient.GetStaticPodOperatorState() + tc.validateStatus(t, status) + } + if tc.validateActions != nil { + tc.validateActions(t, kubeClient.Actions()) + } + if syncErr != nil { + if !strings.Contains(syncErr.Error(), tc.expectSyncError) { + t.Errorf("expected %q string in error %q", tc.expectSyncError, syncErr.Error()) + } + return + } + if syncErr == nil && len(tc.expectSyncError) != 0 { + t.Errorf("expected %v error, got none", tc.expectSyncError) + return + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/staticpod.go b/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/staticpod.go new file mode 100644 index 00000000000..5ed83bfe233 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/revisioncontroller/staticpod.go @@ -0,0 +1,47 @@ +package revisioncontroller + +import ( + "fmt" + + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// StaticPodLatestRevisionClient is an LatestRevisionClient implementation for StaticPodOperatorStatus. +type StaticPodLatestRevisionClient struct { + v1helpers.StaticPodOperatorClient +} + +var _ LatestRevisionClient = StaticPodLatestRevisionClient{} + +func (c StaticPodLatestRevisionClient) GetLatestRevisionState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, int32, string, error) { + spec, status, rv, err := c.GetStaticPodOperatorState() + if err != nil { + return nil, nil, 0, "", err + } + return &spec.OperatorSpec, &status.OperatorStatus, status.LatestAvailableRevision, rv, nil +} + +func (c StaticPodLatestRevisionClient) UpdateLatestRevisionOperatorStatus(latestAvailableRevision int32, updateFuncs ...v1helpers.UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) { + staticPodUpdateFuncs := make([]v1helpers.UpdateStaticPodStatusFunc, 0, len(updateFuncs)) + for _, f := range updateFuncs { + staticPodUpdateFuncs = append(staticPodUpdateFuncs, func(operatorStatus *operatorv1.StaticPodOperatorStatus) error { + return f(&operatorStatus.OperatorStatus) + }) + } + status, changed, err := v1helpers.UpdateStaticPodStatus(c, append(staticPodUpdateFuncs, func(status *operatorv1.StaticPodOperatorStatus) error { + if status.LatestAvailableRevision == latestAvailableRevision { + klog.Warningf("revision %d is unexpectedly already the latest available revision. This is a possible race!", latestAvailableRevision) + return fmt.Errorf("conflicting latestAvailableRevision %d", status.LatestAvailableRevision) + } + status.LatestAvailableRevision = latestAvailableRevision + return nil + })...) + if err != nil { + return nil, false, err + } + return &status.OperatorStatus, changed, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staleconditions/remove_stale_conditions.go b/vendor/github.com/openshift/library-go/pkg/operator/staleconditions/remove_stale_conditions.go new file mode 100644 index 00000000000..f9b94d82763 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staleconditions/remove_stale_conditions.go @@ -0,0 +1,116 @@ +package staleconditions + +import ( + "fmt" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const workQueueKey = "key" + +type RemoveStaleConditions struct { + conditions []string + + operatorClient v1helpers.OperatorClient + cachesToSync []cache.InformerSynced + + eventRecorder events.Recorder + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +func NewRemoveStaleConditions( + conditions []string, + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, +) *RemoveStaleConditions { + c := &RemoveStaleConditions{ + conditions: conditions, + + operatorClient: operatorClient, + eventRecorder: eventRecorder, + cachesToSync: []cache.InformerSynced{operatorClient.Informer().HasSynced}, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RemoveStaleConditions"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + return c +} + +func (c RemoveStaleConditions) sync() error { + removeStaleConditionsFn := func(status *operatorv1.OperatorStatus) error { + for _, condition := range c.conditions { + v1helpers.RemoveOperatorCondition(&status.Conditions, condition) + } + return nil + } + + if _, _, err := v1helpers.UpdateStatus(c.operatorClient, removeStaleConditionsFn); err != nil { + return err + } + + return nil +} + +// Run starts the kube-scheduler and blocks until stopCh is closed. +func (c *RemoveStaleConditions) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting RemoveStaleConditions") + defer klog.Infof("Shutting down RemoveStaleConditions") + + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *RemoveStaleConditions) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *RemoveStaleConditions) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *RemoveStaleConditions) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go new file mode 100644 index 00000000000..a7d5f7fb8e8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go @@ -0,0 +1,134 @@ +package certsyncpod + +import ( + "io/ioutil" + "os" + "time" + + "github.com/spf13/cobra" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" +) + +type CertSyncControllerOptions struct { + KubeConfigFile string + Namespace string + DestinationDir string + + configMaps []revision.RevisionResource + secrets []revision.RevisionResource + + kubeClient kubernetes.Interface + tlsServerNameOverride string +} + +func NewCertSyncControllerCommand(configmaps, secrets []revision.RevisionResource) *cobra.Command { + o := &CertSyncControllerOptions{ + configMaps: configmaps, + secrets: secrets, + } + + cmd := &cobra.Command{ + Use: "cert-syncer --kubeconfig=kubeconfigfile", + Run: func(cmd *cobra.Command, args []string) { + if err := o.Complete(); err != nil { + klog.Fatal(err) + } + if err := o.Run(); err != nil { + klog.Fatal(err) + } + }, + } + + cmd.Flags().StringVar(&o.DestinationDir, "destination-dir", o.DestinationDir, "Directory to write to") + cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "Namespace to read from (default to 'POD_NAMESPACE' environment variable)") + cmd.Flags().StringVar(&o.KubeConfigFile, "kubeconfig", o.KubeConfigFile, "Location of the master configuration file to run from.") + cmd.Flags().StringVar(&o.tlsServerNameOverride, "tls-server-name-override", o.tlsServerNameOverride, "Server name override used by TLS to negotiate the serving cert via SNI.") + + return cmd +} + +func (o *CertSyncControllerOptions) Run() error { + // When the kubeconfig content change, commit suicide to reload its content. + observer, err := fileobserver.NewObserver(500 * time.Millisecond) + if err != nil { + return err + } + + stopCh := make(chan struct{}) + + initialContent, _ := ioutil.ReadFile(o.KubeConfigFile) + observer.AddReactor(fileobserver.TerminateOnChangeReactor(func() { + close(stopCh) + }), map[string][]byte{o.KubeConfigFile: initialContent}, o.KubeConfigFile) + + kubeInformers := informers.NewSharedInformerFactoryWithOptions(o.kubeClient, 10*time.Minute, informers.WithNamespace(o.Namespace)) + + eventRecorder := events.NewKubeRecorder(o.kubeClient.CoreV1().Events(o.Namespace), "cert-syncer", + &corev1.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: os.Getenv("POD_NAMESPACE"), + Name: os.Getenv("POD_NAME"), + }) + + controller, err := NewCertSyncController( + o.DestinationDir, + o.Namespace, + o.configMaps, + o.secrets, + o.kubeClient, + kubeInformers, + eventRecorder, + ) + if err != nil { + return err + } + + // start everything. Informers start after they have been requested. + go controller.Run(1, stopCh) + go observer.Run(stopCh) + go kubeInformers.Start(stopCh) + + <-stopCh + klog.Infof("Shutting down certificate syncer") + + return nil +} + +func (o *CertSyncControllerOptions) Complete() error { + kubeConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfigFile, nil) + if err != nil { + return err + } + + if len(o.Namespace) == 0 && len(os.Getenv("POD_NAMESPACE")) > 0 { + o.Namespace = os.Getenv("POD_NAMESPACE") + } + + protoKubeConfig := rest.CopyConfig(kubeConfig) + protoKubeConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoKubeConfig.ContentType = "application/vnd.kubernetes.protobuf" + + if len(o.tlsServerNameOverride) > 0 { + protoKubeConfig.TLSClientConfig.ServerName = o.tlsServerNameOverride + } + + // This kube client use protobuf, do not use it for CR + kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) + if err != nil { + return err + } + o.kubeClient = kubeClient + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go new file mode 100644 index 00000000000..1670b2c560a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go @@ -0,0 +1,340 @@ +package certsyncpod + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1interface "k8s.io/client-go/kubernetes/typed/core/v1" + v1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" +) + +type CertSyncController struct { + destinationDir string + namespace string + configMaps []revision.RevisionResource + secrets []revision.RevisionResource + + configmapGetter corev1interface.ConfigMapInterface + configMapLister v1.ConfigMapLister + secretGetter corev1interface.SecretInterface + secretLister v1.SecretLister + eventRecorder events.Recorder + + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface + preRunCaches []cache.InformerSynced +} + +func NewCertSyncController(targetDir, targetNamespace string, configmaps, secrets []revision.RevisionResource, kubeClient kubernetes.Interface, informers informers.SharedInformerFactory, eventRecorder events.Recorder) (*CertSyncController, error) { + c := &CertSyncController{ + destinationDir: targetDir, + namespace: targetNamespace, + configMaps: configmaps, + secrets: secrets, + eventRecorder: eventRecorder.WithComponentSuffix("cert-sync-controller"), + + configmapGetter: kubeClient.CoreV1().ConfigMaps(targetNamespace), + configMapLister: informers.Core().V1().ConfigMaps().Lister(), + secretLister: informers.Core().V1().Secrets().Lister(), + secretGetter: kubeClient.CoreV1().Secrets(targetNamespace), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CertSyncController"), + preRunCaches: []cache.InformerSynced{ + informers.Core().V1().ConfigMaps().Informer().HasSynced, + informers.Core().V1().Secrets().Informer().HasSynced, + }, + } + + informers.Core().V1().ConfigMaps().Informer().AddEventHandler(c.eventHandler()) + informers.Core().V1().Secrets().Informer().AddEventHandler(c.eventHandler()) + + return c, nil +} + +func getConfigMapDir(targetDir, configMapName string) string { + return filepath.Join(targetDir, "configmaps", configMapName) +} + +func getSecretDir(targetDir, secretName string) string { + return filepath.Join(targetDir, "secrets", secretName) +} + +func (c *CertSyncController) sync() error { + errors := []error{} + + klog.Infof("Syncing configmaps: %v", c.configMaps) + for _, cm := range c.configMaps { + configMap, err := c.configMapLister.ConfigMaps(c.namespace).Get(cm.Name) + switch { + case apierrors.IsNotFound(err) && !cm.Optional: + errors = append(errors, err) + continue + + case apierrors.IsNotFound(err) && cm.Optional: + // Check with the live call it is really missing + configMap, err = c.configmapGetter.Get(cm.Name, metav1.GetOptions{}) + if err == nil { + klog.Infof("Caches are stale. They don't see configmap '%s/%s', yet it is present", configMap.Namespace, configMap.Name) + // We will get re-queued when we observe the change + continue + } + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + continue + } + + // remove missing content + if err := os.RemoveAll(getConfigMapDir(c.destinationDir, cm.Name)); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed removing file for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) + errors = append(errors, err) + } + c.eventRecorder.Eventf("CertificateRemoved", "Removed file for configmap: %s/%s", configMap.Namespace, configMap.Name) + continue + + case err != nil: + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) + errors = append(errors, err) + continue + } + + contentDir := getConfigMapDir(c.destinationDir, cm.Name) + + data := map[string]string{} + for filename := range configMap.Data { + fullFilename := filepath.Join(contentDir, filename) + + existingContent, err := ioutil.ReadFile(fullFilename) + if err != nil { + if !os.IsNotExist(err) { + klog.Error(err) + } + continue + } + + data[filename] = string(existingContent) + } + + // Check if cached configmap differs + if reflect.DeepEqual(configMap.Data, data) { + continue + } + + klog.V(2).Infof("Syncing updated configmap '%s/%s'.", configMap.Namespace, configMap.Name) + + // We need to do a live get here so we don't overwrite a newer file with one from a stale cache + configMap, err = c.configmapGetter.Get(configMap.Name, metav1.GetOptions{}) + if err != nil { + // Even if the error is not exists we will act on it when caches catch up + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) + errors = append(errors, err) + continue + } + + // Check if the live configmap differs + if reflect.DeepEqual(configMap.Data, data) { + klog.Infof("Caches are stale. The live configmap '%s/%s' is reflected on filesystem, but cached one differs", configMap.Namespace, configMap.Name) + continue + } + + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed creating directory for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) + errors = append(errors, err) + continue + } + for filename, content := range configMap.Data { + fullFilename := filepath.Join(contentDir, filename) + // if the existing is the same, do nothing + if reflect.DeepEqual(data[fullFilename], content) { + continue + } + + klog.Infof("Writing configmap manifest %q ...", fullFilename) + if err := ioutil.WriteFile(fullFilename, []byte(content), 0644); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed writing file for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) + errors = append(errors, err) + continue + } + } + c.eventRecorder.Eventf("CertificateUpdated", "Wrote updated configmap: %s/%s", configMap.Namespace, configMap.Name) + } + + klog.Infof("Syncing secrets: %v", c.secrets) + for _, s := range c.secrets { + secret, err := c.secretLister.Secrets(c.namespace).Get(s.Name) + switch { + case apierrors.IsNotFound(err) && !s.Optional: + errors = append(errors, err) + continue + + case apierrors.IsNotFound(err) && s.Optional: + // Check with the live call it is really missing + secret, err = c.secretGetter.Get(s.Name, metav1.GetOptions{}) + if err == nil { + klog.Infof("Caches are stale. They don't see secret '%s/%s', yet it is present", secret.Namespace, secret.Name) + // We will get re-queued when we observe the change + continue + } + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + continue + } + + // check if the secret file exists, skip firing events if it does not + secretFile := getSecretDir(c.destinationDir, s.Name) + if _, err := os.Stat(secretFile); os.IsNotExist(err) { + continue + } + + // remove missing content + if err := os.RemoveAll(secretFile); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed removing file for missing secret: %s/%s: %v", secret.Namespace, secret.Name, err) + errors = append(errors, err) + continue + } + c.eventRecorder.Warningf("CertificateRemoved", "Removed file for missing secret: %s/%s", secret.Namespace, secret.Name) + continue + + case err != nil: + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting secret: %s/%s: %v", secret.Namespace, secret.Name, err) + errors = append(errors, err) + continue + } + + contentDir := getSecretDir(c.destinationDir, s.Name) + + data := map[string][]byte{} + for filename := range secret.Data { + fullFilename := filepath.Join(contentDir, filename) + + existingContent, err := ioutil.ReadFile(fullFilename) + if err != nil { + if !os.IsNotExist(err) { + klog.Error(err) + } + continue + } + + data[filename] = existingContent + } + + // Check if cached secret differs + if reflect.DeepEqual(secret.Data, data) { + continue + } + + klog.V(2).Infof("Syncing updated secret '%s/%s'.", secret.Namespace, secret.Name) + + // We need to do a live get here so we don't overwrite a newer file with one from a stale cache + secret, err = c.secretGetter.Get(secret.Name, metav1.GetOptions{}) + if err != nil { + // Even if the error is not exists we will act on it when caches catch up + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting secret: %s/%s: %v", secret.Namespace, secret.Name, err) + errors = append(errors, err) + continue + } + + // Check if the live secret differs + if reflect.DeepEqual(secret.Data, data) { + klog.Infof("Caches are stale. The live secret '%s/%s' is reflected on filesystem, but cached one differs", secret.Namespace, secret.Name) + continue + } + + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed creating directory for secret: %s/%s: %v", secret.Namespace, secret.Name, err) + errors = append(errors, err) + continue + } + for filename, content := range secret.Data { + // TODO fix permissions + fullFilename := filepath.Join(contentDir, filename) + // if the existing is the same, do nothing + if reflect.DeepEqual(data[fullFilename], content) { + continue + } + + klog.Infof("Writing secret manifest %q ...", fullFilename) + if err := ioutil.WriteFile(fullFilename, content, 0644); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed writing file for secret: %s/%s: %v", secret.Namespace, secret.Name, err) + errors = append(errors, err) + continue + } + } + c.eventRecorder.Eventf("CertificateUpdated", "Wrote updated secret: %s/%s", secret.Namespace, secret.Name) + } + + return utilerrors.NewAggregate(errors) +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *CertSyncController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting CertSyncer") + defer klog.Infof("Shutting down CertSyncer") + + if !cache.WaitForCacheSync(stopCh, c.preRunCaches...) { + klog.Error("failed waiting for caches") + return + } + klog.V(2).Infof("CertSyncer caches synced") + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *CertSyncController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *CertSyncController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +const workQueueKey = "key" + +// eventHandler queues the operator to check spec and status +func (c *CertSyncController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go new file mode 100644 index 00000000000..960372df8eb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go @@ -0,0 +1,182 @@ +package backingresource + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "k8s.io/klog" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corelisterv1 "k8s.io/client-go/listers/core/v1" + rbaclisterv1 "k8s.io/client-go/listers/rbac/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/backingresource" +) + +// BackingResourceController is a controller that watches the operator config and updates +// service accounts and RBAC rules in the target namespace according to the bindata manifests +// (templated with the config) if they differ. +type BackingResourceController struct { + targetNamespace string + + operatorClient v1helpers.OperatorClient + saLister corelisterv1.ServiceAccountLister + clusterRoleBindingLister rbaclisterv1.ClusterRoleBindingLister + kubeClient kubernetes.Interface + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewBackingResourceController creates a new backing resource controller. +func NewBackingResourceController( + targetNamespace string, + operatorClient v1helpers.OperatorClient, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + kubeClient kubernetes.Interface, + eventRecorder events.Recorder, +) *BackingResourceController { + c := &BackingResourceController{ + targetNamespace: targetNamespace, + operatorClient: operatorClient, + + saLister: kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Lister(), + clusterRoleBindingLister: kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Lister(), + kubeClient: kubeClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "BackingResourceController"), + eventRecorder: eventRecorder.WithComponentSuffix("backing-resource-controller"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().HasSynced) + + return c +} + +func (c BackingResourceController) mustTemplateAsset(name string) ([]byte, error) { + config := struct { + TargetNamespace string + }{ + TargetNamespace: c.targetNamespace, + } + return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join(manifestDir, name)), config).Data, nil +} + +func (c BackingResourceController) sync() error { + operatorSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + directResourceResults := resourceapply.ApplyDirectly(c.kubeClient, c.eventRecorder, c.mustTemplateAsset, + "manifests/installer-sa.yaml", + "manifests/installer-cluster-rolebinding.yaml", + ) + + errs := []error{} + for _, currResult := range directResourceResults { + if currResult.Error != nil { + errs = append(errs, fmt.Errorf("%q (%T): %v", currResult.File, currResult.Type, currResult.Error)) + } + } + err = v1helpers.NewMultiLineAggregate(errs) + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: condition.BackingResourceControllerDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if err != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *BackingResourceController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting BackingResourceController") + defer klog.Infof("Shutting down BackingResourceController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *BackingResourceController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *BackingResourceController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *BackingResourceController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go new file mode 100644 index 00000000000..faf25470e66 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go @@ -0,0 +1,203 @@ +package backingresource + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + clienttesting "k8s.io/client-go/testing" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" +) + +func filterCreateActions(actions []clienttesting.Action) []runtime.Object { + var createdObjects []runtime.Object + for _, a := range actions { + createAction, isCreate := a.(clienttesting.CreateAction) + if !isCreate { + continue + } + createdObjects = append(createdObjects, createAction.GetObject()) + } + return createdObjects +} + +type prependReactorSpec struct { + verb, resource string + reaction clienttesting.ReactionFunc +} + +func TestBackingResourceController(t *testing.T) { + tests := []struct { + targetNamespace string + prependReactors []prependReactorSpec + startingObjects []runtime.Object + operatorClient v1helpers.OperatorClient + validateActions func(t *testing.T, actions []clienttesting.Action) + validateStatus func(t *testing.T, status *operatorv1.OperatorStatus) + expectSyncError string + }{ + { + targetNamespace: "successful-create", + operatorClient: v1helpers.NewFakeOperatorClient( + &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + &operatorv1.OperatorStatus{}, + nil, + ), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 2 { + t.Errorf("expected 2 objects to be created, got %d", createdObjectCount) + return + } + sa, hasServiceAccount := createdObjects[0].(*v1.ServiceAccount) + if !hasServiceAccount { + t.Errorf("expected service account to be created first, got %+v", createdObjects[0]) + return + } + if sa.Namespace != "successful-create" { + t.Errorf("expected that service account to have 'tc-successful-create' namespace, got %q", sa.Namespace) + return + } + if sa.Name != "installer-sa" { + t.Errorf("expected service account to have name 'installer-sa', got %q", sa.Name) + } + + crb, hasClusterRoleBinding := createdObjects[1].(*rbacv1.ClusterRoleBinding) + if !hasClusterRoleBinding { + t.Errorf("expected cluster role binding as second object, got %+v", createdObjects[1]) + } + if rbNamespace := crb.Subjects[0].Namespace; rbNamespace != "successful-create" { + t.Errorf("expected that cluster role binding first subject to have 'tc-successful-create' namespace, got %q", rbNamespace) + return + } + if crb.Name != "system:openshift:operator:successful-create-installer" { + t.Errorf("expected that cluster role binding name is 'system:openshift:operator:tc-successful-create-installer', got %q", crb.Name) + } + }, + }, + { + targetNamespace: "operator-unmanaged", + operatorClient: v1helpers.NewFakeOperatorClient( + &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Unmanaged, + }, + &operatorv1.OperatorStatus{}, + nil, + ), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 0 { + t.Errorf("expected no objects to be created, got %d", createdObjectCount) + } + }, + }, + { + targetNamespace: "service-account-exists", + startingObjects: []runtime.Object{ + &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "installer-sa", Namespace: "service-account-exists"}}, + }, + operatorClient: v1helpers.NewFakeOperatorClient( + &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + &operatorv1.OperatorStatus{}, + nil, + ), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + createdObjects := filterCreateActions(actions) + if createdObjectCount := len(createdObjects); createdObjectCount != 1 { + t.Errorf("expected only one object to be created, got %d", createdObjectCount) + } + crb, hasClusterRoleBinding := createdObjects[0].(*rbacv1.ClusterRoleBinding) + if !hasClusterRoleBinding { + t.Errorf("expected cluster role binding as second object, got %+v", createdObjects[0]) + } + if rbNamespace := crb.Subjects[0].Namespace; rbNamespace != "service-account-exists" { + t.Errorf("expected that cluster role binding first subject to have 'tc-successful-create' namespace, got %q", rbNamespace) + return + } + if crb.Name != "system:openshift:operator:service-account-exists-installer" { + t.Errorf("expected that cluster role binding name is 'system:openshift:operator:tc-successful-create-installer', got %q", crb.Name) + } + }, + }, + { + targetNamespace: "resource-apply-failed", + prependReactors: []prependReactorSpec{ + { + verb: "*", + resource: "serviceaccounts", + reaction: func(clienttesting.Action) (bool, runtime.Object, error) { + return true, nil, fmt.Errorf("test error") + }, + }, + }, + operatorClient: v1helpers.NewFakeOperatorClient( + &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + &operatorv1.OperatorStatus{}, + nil, + ), + expectSyncError: `test error`, + validateStatus: func(t *testing.T, status *operatorv1.OperatorStatus) { + if status.Conditions[0].Type != condition.BackingResourceControllerDegradedConditionType { + t.Errorf("expected status condition to be failing, got %v", status.Conditions[0].Type) + } + if status.Conditions[0].Reason != "Error" { + t.Errorf("expected status condition reason to be 'Error', got %v", status.Conditions[0].Reason) + } + if !strings.Contains(status.Conditions[0].Message, "test error") { + t.Errorf("expected status condition message to contain 'test error', got: %s", status.Conditions[0].Message) + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.targetNamespace, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(tc.startingObjects...) + for _, r := range tc.prependReactors { + kubeClient.PrependReactor(r.verb, r.resource, r.reaction) + } + eventRecorder := events.NewInMemoryRecorder("") + c := NewBackingResourceController( + tc.targetNamespace, + tc.operatorClient, + informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace(tc.targetNamespace)), + kubeClient, + eventRecorder, + ) + syncErr := c.sync() + if tc.validateStatus != nil { + _, status, _, _ := tc.operatorClient.GetOperatorState() + tc.validateStatus(t, status) + } + if syncErr != nil { + if !strings.Contains(syncErr.Error(), tc.expectSyncError) { + t.Errorf("expected %q string in error %q", tc.expectSyncError, syncErr.Error()) + } + return + } + if syncErr == nil && len(tc.expectSyncError) != 0 { + t.Errorf("expected %v error, got none", tc.expectSyncError) + return + } + tc.validateActions(t, kubeClient.Actions()) + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata/bindata.go new file mode 100644 index 00000000000..7aad2ff00ed --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/bindata/bindata.go @@ -0,0 +1,258 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml +// pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:openshift:operator:{{ .TargetNamespace }}-installer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + namespace: {{ .TargetNamespace }} + name: installer-sa +`) + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml, nil +} + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{ .TargetNamespace }} + name: installer-sa +`) + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml, nil +} + +func pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml": pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml, + "pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml": pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "backingresource": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "installer-cluster-rolebinding.yaml": {pkgOperatorStaticpodControllerBackingresourceManifestsInstallerClusterRolebindingYaml, map[string]*bintree{}}, + "installer-sa.yaml": {pkgOperatorStaticpodControllerBackingresourceManifestsInstallerSaYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml new file mode 100644 index 00000000000..ed055ada4b6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-cluster-rolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: system:openshift:operator:{{ .TargetNamespace }}-installer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + namespace: {{ .TargetNamespace }} + name: installer-sa diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml new file mode 100644 index 00000000000..d389483b240 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/manifests/installer-sa.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{ .TargetNamespace }} + name: installer-sa diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata/bindata.go new file mode 100644 index 00000000000..eb39499b498 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata/bindata.go @@ -0,0 +1,263 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml = []byte(`apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: installer +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: installer + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + resources: + requests: + memory: 100M + limits: + memory: 100M + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir`) + +func pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml, nil +} + +func pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml": pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "installer": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "installer-pod.yaml": {pkgOperatorStaticpodControllerInstallerManifestsInstallerPodYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go new file mode 100644 index 00000000000..51c455e8bc2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go @@ -0,0 +1,902 @@ +package installer + +import ( + "context" + "fmt" + "math" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/loglevel" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/bindata" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + installerControllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/installer" + manifestInstallerPodPath = "manifests/installer-pod.yaml" + + hostResourceDirDir = "/etc/kubernetes/static-pod-resources" + hostPodManifestDir = "/etc/kubernetes/manifests" + + revisionLabel = "revision" + statusConfigMapName = "revision-status" +) + +// InstallerController is a controller that watches the currentRevision and targetRevision fields for each node and spawn +// installer pods to update the static pods on the master nodes. +type InstallerController struct { + targetNamespace, staticPodName string + // configMaps is the list of configmaps that are directly copied.A different actor/controller modifies these. + // the first element should be the configmap that contains the static pod manifest + configMaps []revision.RevisionResource + // secrets is a list of secrets that are directly copied for the current values. A different actor/controller modifies these. + secrets []revision.RevisionResource + // command is the string to use for the installer pod command + command []string + + // these are copied separately at the beginning to a fixed location + certConfigMaps []revision.RevisionResource + certSecrets []revision.RevisionResource + certDir string + + operatorClient v1helpers.StaticPodOperatorClient + + configMapsGetter corev1client.ConfigMapsGetter + secretsGetter corev1client.SecretsGetter + podsGetter corev1client.PodsGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder + + // installerPodImageFn returns the image name for the installer pod + installerPodImageFn func() string + // ownerRefsFn sets the ownerrefs on the pruner pod + ownerRefsFn func(revision int32) ([]metav1.OwnerReference, error) + + installerPodMutationFns []InstallerPodMutationFunc +} + +// InstallerPodMutationFunc is a function that has a chance at changing the installer pod before it is created +type InstallerPodMutationFunc func(pod *corev1.Pod, nodeName string, operatorSpec *operatorv1.StaticPodOperatorSpec, revision int32) error + +func (c *InstallerController) WithInstallerPodMutationFn(installerPodMutationFn InstallerPodMutationFunc) *InstallerController { + c.installerPodMutationFns = append(c.installerPodMutationFns, installerPodMutationFn) + return c +} + +func (c *InstallerController) WithCerts(certDir string, certConfigMaps, certSecrets []revision.RevisionResource) *InstallerController { + c.certDir = certDir + c.certConfigMaps = certConfigMaps + c.certSecrets = certSecrets + return c +} + +// staticPodState is the status of a static pod that has been installed to a node. +type staticPodState int + +const ( + // staticPodStatePending means that the installed static pod is not up yet. + staticPodStatePending = staticPodState(iota) + // staticPodStateReady means that the installed static pod is ready. + staticPodStateReady + // staticPodStateFailed means that the static pod installation of a node has failed. + staticPodStateFailed +) + +// NewInstallerController creates a new installer controller. +func NewInstallerController( + targetNamespace, staticPodName string, + configMaps []revision.RevisionResource, + secrets []revision.RevisionResource, + command []string, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + operatorClient v1helpers.StaticPodOperatorClient, + configMapsGetter corev1client.ConfigMapsGetter, + secretsGetter corev1client.SecretsGetter, + podsGetter corev1client.PodsGetter, + eventRecorder events.Recorder, +) *InstallerController { + c := &InstallerController{ + targetNamespace: targetNamespace, + staticPodName: staticPodName, + configMaps: configMaps, + secrets: secrets, + command: command, + + operatorClient: operatorClient, + configMapsGetter: configMapsGetter, + secretsGetter: secretsGetter, + podsGetter: podsGetter, + eventRecorder: eventRecorder.WithComponentSuffix("installer-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "InstallerController"), + + installerPodImageFn: getInstallerPodImageFromEnv, + } + + c.ownerRefsFn = c.setOwnerRefs + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + + return c +} + +func (c *InstallerController) getStaticPodState(nodeName string) (state staticPodState, revision, reason string, errors []string, err error) { + pod, err := c.podsGetter.Pods(c.targetNamespace).Get(mirrorPodNameForNode(c.staticPodName, nodeName), metav1.GetOptions{}) + if err != nil { + return staticPodStatePending, "", "", nil, err + } + switch pod.Status.Phase { + case corev1.PodRunning, corev1.PodSucceeded: + for _, c := range pod.Status.Conditions { + if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { + return staticPodStateReady, pod.Labels[revisionLabel], "static pod is ready", nil, nil + } + } + return staticPodStatePending, pod.Labels[revisionLabel], "static pod is not ready", nil, nil + case corev1.PodFailed: + return staticPodStateFailed, pod.Labels[revisionLabel], "static pod has failed", []string{pod.Status.Message}, nil + } + + return staticPodStatePending, pod.Labels[revisionLabel], fmt.Sprintf("static pod has unknown phase: %v", pod.Status.Phase), nil, nil +} + +// nodeToStartRevisionWith returns a node index i and guarantees for every node < i that it is +// - not updating +// - ready +// - at the revision claimed in CurrentRevision. +func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state staticPodState, revision, reason string, errors []string, err error), nodes []operatorv1.NodeStatus) (int, string, error) { + if len(nodes) == 0 { + return 0, "", fmt.Errorf("nodes array cannot be empty") + } + + // find upgrading node as this will be the first to start new revision (to minimize number of down nodes) + for i := range nodes { + if nodes[i].TargetRevision != 0 { + reason := fmt.Sprintf("node %s is progressing towards %d", nodes[i].NodeName, nodes[i].TargetRevision) + return i, reason, nil + } + } + + // otherwise try to find a node that is not ready. Take the oldest one. + oldestNotReadyRevisionNode := -1 + oldestNotReadyRevision := math.MaxInt32 + for i := range nodes { + currNodeState := &nodes[i] + state, runningRevision, _, _, err := getStaticPodState(currNodeState.NodeName) + if err != nil && apierrors.IsNotFound(err) { + return i, fmt.Sprintf("node %s static pod not found", currNodeState.NodeName), nil + } + if err != nil { + return 0, "", err + } + revisionNum, err := strconv.Atoi(runningRevision) + if err != nil { + reason := fmt.Sprintf("node %s has an invalid current revision %q", currNodeState.NodeName, runningRevision) + return i, reason, nil + } + if state != staticPodStateReady && revisionNum < oldestNotReadyRevision { + oldestNotReadyRevisionNode = i + oldestNotReadyRevision = revisionNum + } + } + if oldestNotReadyRevisionNode >= 0 { + reason := fmt.Sprintf("node %s with revision %d is the oldest not ready", nodes[oldestNotReadyRevisionNode].NodeName, oldestNotReadyRevision) + return oldestNotReadyRevisionNode, reason, nil + } + + // find a node that has the wrong revision. Take the oldest one. + oldestPodRevisionNode := -1 + oldestPodRevision := math.MaxInt32 + for i := range nodes { + currNodeState := &nodes[i] + _, runningRevision, _, _, err := getStaticPodState(currNodeState.NodeName) + if err != nil && apierrors.IsNotFound(err) { + return i, fmt.Sprintf("node %s static pod not found", currNodeState.NodeName), nil + } + if err != nil { + return 0, "", err + } + revisionNum, err := strconv.Atoi(runningRevision) + if err != nil { + reason := fmt.Sprintf("node %s has an invalid current revision %q", currNodeState.NodeName, runningRevision) + return i, reason, nil + } + if revisionNum != int(currNodeState.CurrentRevision) && revisionNum < oldestPodRevision { + oldestPodRevisionNode = i + oldestPodRevision = revisionNum + } + } + if oldestPodRevisionNode >= 0 { + reason := fmt.Sprintf("node %s with revision %d is the oldest not matching its expected revision %d", nodes[oldestPodRevisionNode].NodeName, oldestPodRevisionNode, nodes[oldestPodRevisionNode].CurrentRevision) + return oldestPodRevisionNode, reason, nil + } + + // last but not least, choose the one with the older current revision. This will imply that failed installer pods will be retried. + oldestCurrentRevisionNode := -1 + oldestCurrentRevision := int32(math.MaxInt32) + for i := range nodes { + currNodeState := &nodes[i] + if currNodeState.CurrentRevision < oldestCurrentRevision { + oldestCurrentRevisionNode = i + oldestCurrentRevision = currNodeState.CurrentRevision + } + } + if oldestCurrentRevisionNode >= 0 { + reason := fmt.Sprintf("node %s with revision %d is the oldest", nodes[oldestCurrentRevisionNode].NodeName, oldestCurrentRevision) + return oldestCurrentRevisionNode, reason, nil + } + + reason := fmt.Sprintf("node %s of revision %d is no worse than any other node, but comes first", nodes[0].NodeName, oldestCurrentRevision) + return 0, reason, nil +} + +// manageInstallationPods takes care of creating content for the static pods to install. +// returns whether or not requeue and if an error happened when updating status. Normally it updates status itself. +func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.StaticPodOperatorSpec, originalOperatorStatus *operatorv1.StaticPodOperatorStatus, resourceVersion string) (bool, error) { + operatorStatus := originalOperatorStatus.DeepCopy() + + if len(operatorStatus.NodeStatuses) == 0 { + return false, nil + } + + // stop on first deployment failure of the latest revision (excluding OOM, that never sets LatestAvailableRevision). + for _, s := range operatorStatus.NodeStatuses { + if s.LastFailedRevision == operatorStatus.LatestAvailableRevision { + return false, nil + } + } + + // start with node which is in worst state (instead of terminating healthy pods first) + startNode, nodeChoiceReason, err := nodeToStartRevisionWith(c.getStaticPodState, operatorStatus.NodeStatuses) + if err != nil { + return true, err + } + + for l := 0; l < len(operatorStatus.NodeStatuses); l++ { + i := (startNode + l) % len(operatorStatus.NodeStatuses) + + var currNodeState *operatorv1.NodeStatus + var prevNodeState *operatorv1.NodeStatus + currNodeState = &operatorStatus.NodeStatuses[i] + if l > 0 { + prev := (startNode + l - 1) % len(operatorStatus.NodeStatuses) + prevNodeState = &operatorStatus.NodeStatuses[prev] + nodeChoiceReason = fmt.Sprintf("node %s is the next node in the line", currNodeState.NodeName) + } + + // if we are in a transition, check to see whether our installer pod completed + if currNodeState.TargetRevision > currNodeState.CurrentRevision { + if err := c.ensureInstallerPod(currNodeState.NodeName, operatorSpec, currNodeState.TargetRevision); err != nil { + c.eventRecorder.Warningf("InstallerPodFailed", "Failed to create installer pod for revision %d on node %q: %v", + currNodeState.TargetRevision, currNodeState.NodeName, err) + return true, err + } + + pendingNewRevision := operatorStatus.LatestAvailableRevision > currNodeState.TargetRevision + newCurrNodeState, installerPodFailed, reason, err := c.newNodeStateForInstallInProgress(currNodeState, pendingNewRevision) + if err != nil { + return true, err + } + + // if we make a change to this status, we want to write it out to the API before we commence work on the next node. + // it's an extra write/read, but it makes the state debuggable from outside this process + if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) { + klog.Infof("%q moving to %v because %s", currNodeState.NodeName, spew.Sdump(*newCurrNodeState), reason) + newOperatorStatus, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions) + if updateError != nil { + return false, updateError + } else if updated && currNodeState.CurrentRevision != newCurrNodeState.CurrentRevision { + c.eventRecorder.Eventf("NodeCurrentRevisionChanged", "Updated node %q from revision %d to %d because %s", currNodeState.NodeName, + currNodeState.CurrentRevision, newCurrNodeState.CurrentRevision, reason) + } + if err := c.updateRevisionStatus(newOperatorStatus); err != nil { + klog.Errorf("error updating revision status configmap: %v", err) + } + return false, nil + } else { + klog.V(2).Infof("%q is in transition to %d, but has not made progress because %s", currNodeState.NodeName, currNodeState.TargetRevision, reason) + } + + // We want to retry the installer pod by deleting and then rekicking. Also we don't set LastFailedRevision. + if !installerPodFailed { + break + } + klog.Infof("Retrying %q for revision %d because %s", currNodeState.NodeName, currNodeState.TargetRevision, reason) + installerPodName := getInstallerPodName(currNodeState.TargetRevision, currNodeState.NodeName) + if err := c.podsGetter.Pods(c.targetNamespace).Delete(installerPodName, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + return true, err + } + } + + revisionToStart := c.getRevisionToStart(currNodeState, prevNodeState, operatorStatus) + if revisionToStart == 0 { + klog.V(4).Infof("%s, but node %s does not need update", nodeChoiceReason, currNodeState.NodeName) + continue + } + klog.Infof("%s and needs new revision %d", nodeChoiceReason, revisionToStart) + + newCurrNodeState := currNodeState.DeepCopy() + newCurrNodeState.TargetRevision = revisionToStart + newCurrNodeState.LastFailedRevisionErrors = nil + + // if we make a change to this status, we want to write it out to the API before we commence work on the next node. + // it's an extra write/read, but it makes the state debuggable from outside this process + if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) { + klog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState)) + if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { + return false, updateError + } else if updated && currNodeState.TargetRevision != newCurrNodeState.TargetRevision && newCurrNodeState.TargetRevision != 0 { + c.eventRecorder.Eventf("NodeTargetRevisionChanged", "Updating node %q from revision %d to %d because %s", currNodeState.NodeName, + currNodeState.CurrentRevision, newCurrNodeState.TargetRevision, nodeChoiceReason) + } + + return false, nil + } + break + } + + return false, nil +} + +func (c *InstallerController) updateRevisionStatus(operatorStatus *operatorv1.StaticPodOperatorStatus) error { + failedRevisions := make(map[int32]struct{}) + currentRevisions := make(map[int32]struct{}) + for _, nodeState := range operatorStatus.NodeStatuses { + failedRevisions[nodeState.LastFailedRevision] = struct{}{} + currentRevisions[nodeState.CurrentRevision] = struct{}{} + } + delete(failedRevisions, 0) + + // If all current revisions point to the same revision, then mark it successful + if len(currentRevisions) == 1 { + err := c.updateConfigMapForRevision(currentRevisions, string(corev1.PodSucceeded)) + if err != nil { + return err + } + } + return c.updateConfigMapForRevision(failedRevisions, string(corev1.PodFailed)) +} + +func (c *InstallerController) updateConfigMapForRevision(currentRevisions map[int32]struct{}, status string) error { + for currentRevision := range currentRevisions { + statusConfigMap, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(statusConfigMapNameForRevision(currentRevision), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + klog.Infof("%s configmap not found, skipping update revision status", statusConfigMapNameForRevision(currentRevision)) + continue + } + if err != nil { + return err + } + statusConfigMap.Data["status"] = status + _, _, err = resourceapply.ApplyConfigMap(c.configMapsGetter, c.eventRecorder, statusConfigMap) + if err != nil { + return err + } + } + return nil +} + +func setNodeStatusFn(status *operatorv1.NodeStatus) v1helpers.UpdateStaticPodStatusFunc { + return func(operatorStatus *operatorv1.StaticPodOperatorStatus) error { + for i := range operatorStatus.NodeStatuses { + if operatorStatus.NodeStatuses[i].NodeName == status.NodeName { + operatorStatus.NodeStatuses[i] = *status + break + } + } + return nil + } +} + +// setAvailableProgressingConditions sets the Available and Progressing conditions +func setAvailableProgressingNodeInstallerFailingConditions(newStatus *operatorv1.StaticPodOperatorStatus) error { + // Available means that we have at least one pod at the latest level + numAvailable := 0 + numAtLatestRevision := 0 + numProgressing := 0 + counts := map[int32]int{} + failingCount := map[int32]int{} + failing := map[int32][]string{} + for _, currNodeStatus := range newStatus.NodeStatuses { + counts[currNodeStatus.CurrentRevision] = counts[currNodeStatus.CurrentRevision] + 1 + if currNodeStatus.CurrentRevision != 0 { + numAvailable++ + } + + // keep track of failures so that we can report failing status + if currNodeStatus.LastFailedRevision != 0 { + failingCount[currNodeStatus.LastFailedRevision] = failingCount[currNodeStatus.LastFailedRevision] + 1 + failing[currNodeStatus.LastFailedRevision] = append(failing[currNodeStatus.LastFailedRevision], currNodeStatus.LastFailedRevisionErrors...) + } + + if newStatus.LatestAvailableRevision == currNodeStatus.CurrentRevision { + numAtLatestRevision += 1 + } else { + numProgressing += 1 + } + } + + revisionStrings := []string{} + for _, currentRevision := range Int32KeySet(counts).List() { + count := counts[currentRevision] + revisionStrings = append(revisionStrings, fmt.Sprintf("%d nodes are at revision %d", count, currentRevision)) + } + // if we are progressing and no nodes have achieved that level, we should indicate + if numProgressing > 0 && counts[newStatus.LatestAvailableRevision] == 0 { + revisionStrings = append(revisionStrings, fmt.Sprintf("%d nodes have achieved new revision %d", 0, newStatus.LatestAvailableRevision)) + } + revisionDescription := strings.Join(revisionStrings, "; ") + + if numAvailable > 0 { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeAvailable, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("%d nodes are active; %s", numAvailable, revisionDescription), + }) + } else { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeAvailable, + Status: operatorv1.ConditionFalse, + Reason: "ZeroNodesActive", + Message: fmt.Sprintf("%d nodes are active; %s", numAvailable, revisionDescription), + }) + } + + // Progressing means that the any node is not at the latest available revision + if numProgressing > 0 { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeProgressing, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("%s", revisionDescription), + }) + } else { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: operatorv1.OperatorStatusTypeProgressing, + Status: operatorv1.ConditionFalse, + Reason: "AllNodesAtLatestRevision", + Message: fmt.Sprintf("%s", revisionDescription), + }) + } + + if len(failing) > 0 { + failingStrings := []string{} + for _, failingRevision := range Int32KeySet(failing).List() { + errorStrings := failing[failingRevision] + failingStrings = append(failingStrings, fmt.Sprintf("%d nodes are failing on revision %d:\n%v", failingCount[failingRevision], failingRevision, strings.Join(errorStrings, "\n"))) + } + failingDescription := strings.Join(failingStrings, "; ") + + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: condition.NodeInstallerDegradedConditionType, + Status: operatorv1.ConditionTrue, + Reason: "InstallerPodFailed", + Message: failingDescription, + }) + } else { + v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ + Type: condition.NodeInstallerDegradedConditionType, + Status: operatorv1.ConditionFalse, + }) + } + + return nil +} + +// newNodeStateForInstallInProgress returns the new NodeState, whether it was killed by OOM or an error +func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *operatorv1.NodeStatus, newRevisionPending bool) (status *operatorv1.NodeStatus, installerPodFailed bool, reason string, err error) { + ret := currNodeState.DeepCopy() + installerPod, err := c.podsGetter.Pods(c.targetNamespace).Get(getInstallerPodName(currNodeState.TargetRevision, currNodeState.NodeName), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + ret.LastFailedRevision = currNodeState.TargetRevision + ret.TargetRevision = currNodeState.CurrentRevision + ret.LastFailedRevisionErrors = []string{err.Error()} + return ret, false, "installer pod was not found", nil + } + if err != nil { + return nil, false, "", err + } + + failed := false + errors := []string{} + reason = "" + + switch installerPod.Status.Phase { + case corev1.PodSucceeded: + if newRevisionPending { + // stop early, don't wait for ready static pod because a new revision is waiting + ret.LastFailedRevision = currNodeState.TargetRevision + ret.TargetRevision = 0 + ret.LastFailedRevisionErrors = []string{fmt.Sprintf("static pod of revision has been installed, but is not ready while new revision %d is pending", currNodeState.TargetRevision)} + return ret, false, "new revision pending", nil + } + + state, currentRevision, staticPodReason, failedErrors, err := c.getStaticPodState(currNodeState.NodeName) + if err != nil && apierrors.IsNotFound(err) { + // pod not launched yet + // TODO: have a timeout here and retry the installer + reason = "static pod is pending" + break + } + if err != nil { + return nil, false, "", err + } + + if currentRevision != strconv.Itoa(int(currNodeState.TargetRevision)) { + // new updated pod to be launched + if len(currentRevision) == 0 { + reason = fmt.Sprintf("waiting for static pod of revision %d", currNodeState.TargetRevision) + } else { + reason = fmt.Sprintf("waiting for static pod of revision %d, found %s", currNodeState.TargetRevision, currentRevision) + } + break + } + + switch state { + case staticPodStateFailed: + failed = true + reason = staticPodReason + errors = failedErrors + + case staticPodStateReady: + if currNodeState.TargetRevision > ret.CurrentRevision { + ret.CurrentRevision = currNodeState.TargetRevision + } + ret.TargetRevision = 0 + ret.LastFailedRevision = 0 + ret.LastFailedRevisionErrors = nil + return ret, false, staticPodReason, nil + default: + reason = "static pod is pending" + } + + case corev1.PodFailed: + failed = true + reason = "installer pod failed" + for _, containerStatus := range installerPod.Status.ContainerStatuses { + if containerStatus.State.Terminated != nil && len(containerStatus.State.Terminated.Message) > 0 { + errors = append(errors, fmt.Sprintf("%s: %s", containerStatus.Name, containerStatus.State.Terminated.Message)) + c.eventRecorder.Warningf("InstallerPodFailed", "installer errors: %v", strings.Join(errors, "\n")) + // do not set LastFailedRevision + return currNodeState, true, fmt.Sprintf("installer pod failed: %v", strings.Join(errors, "\n")), nil + } + } + } + + if failed { + ret.LastFailedRevision = currNodeState.TargetRevision + ret.TargetRevision = 0 + if len(errors) == 0 { + errors = append(errors, fmt.Sprintf("no detailed termination message, see `oc get -n %q pods/%q -oyaml`", installerPod.Namespace, installerPod.Name)) + } + ret.LastFailedRevisionErrors = errors + return ret, false, "installer pod failed", nil + } + + return ret, false, reason, nil +} + +// getRevisionToStart returns the revision we need to start or zero if none +func (c *InstallerController) getRevisionToStart(currNodeState, prevNodeState *operatorv1.NodeStatus, operatorStatus *operatorv1.StaticPodOperatorStatus) int32 { + if prevNodeState == nil { + currentAtLatest := currNodeState.CurrentRevision == operatorStatus.LatestAvailableRevision + failedAtLatest := currNodeState.LastFailedRevision == operatorStatus.LatestAvailableRevision + if !currentAtLatest && !failedAtLatest { + return operatorStatus.LatestAvailableRevision + } + return 0 + } + + prevFinished := prevNodeState.TargetRevision == 0 + prevInTransition := prevNodeState.CurrentRevision != prevNodeState.TargetRevision + if prevInTransition && !prevFinished { + return 0 + } + + prevAhead := prevNodeState.CurrentRevision > currNodeState.CurrentRevision + failedAtPrev := currNodeState.LastFailedRevision == prevNodeState.CurrentRevision + if prevAhead && !failedAtPrev { + return prevNodeState.CurrentRevision + } + + return 0 +} + +func getInstallerPodName(revision int32, nodeName string) string { + return fmt.Sprintf("installer-%d-%s", revision, nodeName) +} + +// ensureInstallerPod creates the installer pod with the secrets required to if it does not exist already +func (c *InstallerController) ensureInstallerPod(nodeName string, operatorSpec *operatorv1.StaticPodOperatorSpec, revision int32) error { + pod := resourceread.ReadPodV1OrDie(bindata.MustAsset(filepath.Join(manifestDir, manifestInstallerPodPath))) + + pod.Namespace = c.targetNamespace + pod.Name = getInstallerPodName(revision, nodeName) + pod.Spec.NodeName = nodeName + pod.Spec.Containers[0].Image = c.installerPodImageFn() + pod.Spec.Containers[0].Command = c.command + + ownerRefs, err := c.ownerRefsFn(revision) + if err != nil { + return fmt.Errorf("unable to set installer pod ownerrefs: %+v", err) + } + pod.OwnerReferences = ownerRefs + + if c.configMaps[0].Optional { + return fmt.Errorf("pod configmap %s is required, cannot be optional", c.configMaps[0].Name) + } + + args := []string{ + fmt.Sprintf("-v=%d", loglevel.LogLevelToKlog(operatorSpec.LogLevel)), + fmt.Sprintf("--revision=%d", revision), + fmt.Sprintf("--namespace=%s", pod.Namespace), + fmt.Sprintf("--pod=%s", c.configMaps[0].Name), + fmt.Sprintf("--resource-dir=%s", hostResourceDirDir), + fmt.Sprintf("--pod-manifest-dir=%s", hostPodManifestDir), + } + for _, cm := range c.configMaps { + if cm.Optional { + args = append(args, fmt.Sprintf("--optional-configmaps=%s", cm.Name)) + } else { + args = append(args, fmt.Sprintf("--configmaps=%s", cm.Name)) + } + } + for _, s := range c.secrets { + if s.Optional { + args = append(args, fmt.Sprintf("--optional-secrets=%s", s.Name)) + } else { + args = append(args, fmt.Sprintf("--secrets=%s", s.Name)) + } + } + if len(c.certDir) > 0 { + args = append(args, fmt.Sprintf("--cert-dir=%s", filepath.Join(hostResourceDirDir, c.certDir))) + for _, cm := range c.certConfigMaps { + if cm.Optional { + args = append(args, fmt.Sprintf("--optional-cert-configmaps=%s", cm.Name)) + } else { + args = append(args, fmt.Sprintf("--cert-configmaps=%s", cm.Name)) + } + } + for _, s := range c.certSecrets { + if s.Optional { + args = append(args, fmt.Sprintf("--optional-cert-secrets=%s", s.Name)) + } else { + args = append(args, fmt.Sprintf("--cert-secrets=%s", s.Name)) + } + } + } + + pod.Spec.Containers[0].Args = args + + // Some owners need to change aspects of the pod. Things like arguments for instance + for _, fn := range c.installerPodMutationFns { + if err := fn(pod, nodeName, operatorSpec, revision); err != nil { + return err + } + } + + _, _, err = resourceapply.ApplyPod(c.podsGetter, c.eventRecorder, pod) + return err +} + +func (c *InstallerController) setOwnerRefs(revision int32) ([]metav1.OwnerReference, error) { + ownerReferences := []metav1.OwnerReference{} + statusConfigMap, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(fmt.Sprintf("revision-status-%d", revision), metav1.GetOptions{}) + if err == nil { + ownerReferences = append(ownerReferences, metav1.OwnerReference{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: statusConfigMap.Name, + UID: statusConfigMap.UID, + }) + } + return ownerReferences, err +} + +func getInstallerPodImageFromEnv() string { + return os.Getenv("OPERATOR_IMAGE") +} + +func (c InstallerController) ensureSecretRevisionResourcesExists(secrets []revision.RevisionResource, hasRevisionSuffix bool, latestRevisionNumber int32) error { + missing := sets.NewString() + for _, secret := range secrets { + if secret.Optional { + continue + } + name := secret.Name + if !hasRevisionSuffix { + name = fmt.Sprintf("%s-%d", name, latestRevisionNumber) + } + _, err := c.secretsGetter.Secrets(c.targetNamespace).Get(name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing.Insert(name) + } + } + if missing.Len() == 0 { + return nil + } + return fmt.Errorf("secrets: %s", strings.Join(missing.List(), ",")) +} + +func (c InstallerController) ensureConfigMapRevisionResourcesExists(configs []revision.RevisionResource, hasRevisionSuffix bool, latestRevisionNumber int32) error { + missing := sets.NewString() + for _, config := range configs { + if config.Optional { + continue + } + name := config.Name + if !hasRevisionSuffix { + name = fmt.Sprintf("%s-%d", name, latestRevisionNumber) + } + _, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing.Insert(name) + } + } + if missing.Len() == 0 { + return nil + } + return fmt.Errorf("configmaps: %s", strings.Join(missing.List(), ",")) +} + +// ensureRequiredResourcesExist makes sure that all non-optional resources are ready or it will return an error to trigger a requeue so that we try again. +func (c InstallerController) ensureRequiredResourcesExist(revisionNumber int32) error { + errs := []error{} + + errs = append(errs, c.ensureConfigMapRevisionResourcesExists(c.certConfigMaps, true, revisionNumber)) + errs = append(errs, c.ensureConfigMapRevisionResourcesExists(c.configMaps, false, revisionNumber)) + errs = append(errs, c.ensureSecretRevisionResourcesExists(c.certSecrets, true, revisionNumber)) + errs = append(errs, c.ensureSecretRevisionResourcesExists(c.secrets, false, revisionNumber)) + + aggregatedErr := utilerrors.NewAggregate(errs) + if aggregatedErr == nil { + return nil + } + + eventMessages := []string{} + for _, err := range aggregatedErr.Errors() { + eventMessages = append(eventMessages, err.Error()) + } + c.eventRecorder.Warningf("RequiredInstallerResourcesMissing", strings.Join(eventMessages, ", ")) + return fmt.Errorf("missing required resources: %v", aggregatedErr) +} + +func (c InstallerController) sync() error { + operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + operatorStatus := originalOperatorStatus.DeepCopy() + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + err = c.ensureRequiredResourcesExist(originalOperatorStatus.LatestAvailableRevision) + + // Only manage installation pods when all required certs are present. + if err == nil { + requeue, syncErr := c.manageInstallationPods(operatorSpec, operatorStatus, resourceVersion) + if requeue && syncErr == nil { + return fmt.Errorf("synthetic requeue request") + } + err = syncErr + } + + // Update failing condition + // If required certs are missing, this will report degraded as we can't create installer pods because of this pre-condition. + cond := operatorv1.OperatorCondition{ + Type: condition.InstallerControllerDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if err != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *InstallerController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting InstallerController") + defer klog.Infof("Shutting down InstallerController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *InstallerController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *InstallerController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *InstallerController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(installerControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(installerControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(installerControllerWorkQueueKey) }, + } +} + +func mirrorPodNameForNode(staticPodName, nodeName string) string { + return staticPodName + "-" + nodeName +} + +func statusConfigMapNameForRevision(revision int32) string { + return fmt.Sprintf("%s-%d", statusConfigMapName, revision) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go new file mode 100644 index 00000000000..3e2cebb3963 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go @@ -0,0 +1,1448 @@ +package installer + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func TestNewNodeStateForInstallInProgress(t *testing.T) { + kubeClient := fake.NewSimpleClientset( + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-config"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-secret"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-secret", 1)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-config", 1)}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-secret", 2)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-config", 2)}}, + ) + + var installerPod *corev1.Pod + + kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + if installerPod != nil { + return true, nil, errors.NewAlreadyExists(schema.GroupResource{Resource: "pods"}, installerPod.Name) + } + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) + kubeClient.PrependReactor("get", "pods", getPodsReactor(installerPod)) + return true, installerPod, nil + }) + + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ) + + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + podCommand := []string{"/bin/true", "--foo=test", "--bar"} + c := NewInstallerController( + "test", "test-pod", + []revision.RevisionResource{{Name: "test-config"}}, + []revision.RevisionResource{{Name: "test-secret"}}, + podCommand, + kubeInformers, + fakeStaticPodOperatorClient, + kubeClient.CoreV1(), + kubeClient.CoreV1(), + kubeClient.CoreV1(), + eventRecorder, + ) + c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { + return []metav1.OwnerReference{}, nil + } + c.installerPodImageFn = func() string { return "docker.io/foo/bar" } + + t.Log("setting target revision") + if err := c.sync(); err != nil { + t.Fatal(err) + } + + if installerPod != nil { + t.Fatalf("not expected to create installer pod yet") + } + + _, currStatus, _, _ := fakeStaticPodOperatorClient.GetStaticPodOperatorState() + if currStatus.NodeStatuses[0].TargetRevision != 1 { + t.Fatalf("expected target revision generation 1, got: %d", currStatus.NodeStatuses[0].TargetRevision) + } + + t.Log("starting installer pod") + + if err := c.sync(); err != nil { + t.Fatal(err) + } + if installerPod == nil { + t.Fatalf("expected to create installer pod") + } + + t.Run("VerifyPodCommand", func(t *testing.T) { + cmd := installerPod.Spec.Containers[0].Command + if !reflect.DeepEqual(podCommand, cmd) { + t.Errorf("expected pod command %#v to match resulting installer pod command: %#v", podCommand, cmd) + } + }) + + t.Run("VerifyPodArguments", func(t *testing.T) { + args := installerPod.Spec.Containers[0].Args + if len(args) == 0 { + t.Errorf("pod args should not be empty") + } + foundRevision := false + for _, arg := range args { + if arg == "--revision=1" { + foundRevision = true + } + } + if !foundRevision { + t.Errorf("revision installer argument not found") + } + }) + + t.Log("synching again, nothing happens") + if err := c.sync(); err != nil { + t.Fatal(err) + } + + if currStatus.NodeStatuses[0].TargetRevision != 1 { + t.Fatalf("expected target revision generation 1, got: %d", currStatus.NodeStatuses[0].TargetRevision) + } + if currStatus.NodeStatuses[0].CurrentRevision != 0 { + t.Fatalf("expected current revision generation 0, got: %d", currStatus.NodeStatuses[0].CurrentRevision) + } + + t.Log("installer succeeded") + installerPod.Status.Phase = corev1.PodSucceeded + + if err := c.sync(); err != nil { + t.Fatal(err) + } + + _, currStatus, _, _ = fakeStaticPodOperatorClient.GetStaticPodOperatorState() + if generation := currStatus.NodeStatuses[0].CurrentRevision; generation != 0 { + t.Errorf("expected current revision generation for node to be 0, got %d", generation) + } + + t.Log("static pod launched, but is not ready") + staticPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod-test-node-1", + Namespace: "test", + Labels: map[string]string{"revision": "1"}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Status: corev1.ConditionFalse, + Type: corev1.PodReady, + }, + }, + Phase: corev1.PodRunning, + }, + } + kubeClient.PrependReactor("get", "pods", getPodsReactor(staticPod)) + + if err := c.sync(); err != nil { + t.Fatal(err) + } + + _, currStatus, _, _ = fakeStaticPodOperatorClient.GetStaticPodOperatorState() + if generation := currStatus.NodeStatuses[0].CurrentRevision; generation != 0 { + t.Errorf("expected current revision generation for node to be 0, got %d", generation) + } + + t.Log("static pod is ready") + staticPod.Status.Conditions[0].Status = corev1.ConditionTrue + + if err := c.sync(); err != nil { + t.Fatal(err) + } + + _, currStatus, _, _ = fakeStaticPodOperatorClient.GetStaticPodOperatorState() + if generation := currStatus.NodeStatuses[0].CurrentRevision; generation != 1 { + t.Errorf("expected current revision generation for node to be 1, got %d", generation) + } + + _, currStatus, _, _ = fakeStaticPodOperatorClient.GetStaticPodOperatorState() + currStatus.LatestAvailableRevision = 2 + currStatus.NodeStatuses[0].TargetRevision = 2 + currStatus.NodeStatuses[0].CurrentRevision = 1 + fakeStaticPodOperatorClient.UpdateStaticPodOperatorStatus("1", currStatus) + + installerPod.Name = "installer-2-test-node-1" + installerPod.Status.Phase = corev1.PodFailed + installerPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Name: "installer", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{Message: "fake death"}, + }, + }, + } + if err := c.sync(); err != nil { + t.Fatal(err) + } + + _, currStatus, _, _ = fakeStaticPodOperatorClient.GetStaticPodOperatorState() + if generation := currStatus.NodeStatuses[0].LastFailedRevision; generation != 0 { + t.Errorf("expected last failed revision generation for node to be 0, got %d", generation) + } + + // installer pod failures are suppressed + if errors := currStatus.NodeStatuses[0].LastFailedRevisionErrors; len(errors) != 0 { + t.Error(errors) + } + + if v1helpers.FindOperatorCondition(currStatus.Conditions, operatorv1.OperatorStatusTypeProgressing) == nil { + t.Error("missing Progressing") + } + if v1helpers.FindOperatorCondition(currStatus.Conditions, operatorv1.OperatorStatusTypeAvailable) == nil { + t.Error("missing Available") + } +} + +func getPodsReactor(pods ...*corev1.Pod) ktesting.ReactionFunc { + return func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + podName := action.(ktesting.GetAction).GetName() + for _, p := range pods { + if p.Namespace == action.GetNamespace() && p.Name == podName { + return true, p, nil + } + } + return false, nil, nil + } +} + +func TestCreateInstallerPod(t *testing.T) { + kubeClient := fake.NewSimpleClientset( + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-config"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-secret"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-secret", 1)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-config", 1)}}, + ) + + var installerPod *corev1.Pod + kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) + return false, nil, nil + }) + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) + + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + + c := NewInstallerController( + "test", "test-pod", + []revision.RevisionResource{{Name: "test-config"}}, + []revision.RevisionResource{{Name: "test-secret"}}, + []string{"/bin/true"}, + kubeInformers, + fakeStaticPodOperatorClient, + kubeClient.CoreV1(), + kubeClient.CoreV1(), + kubeClient.CoreV1(), + eventRecorder, + ) + c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { + return []metav1.OwnerReference{}, nil + } + c.installerPodImageFn = func() string { return "docker.io/foo/bar" } + if err := c.sync(); err != nil { + t.Fatal(err) + } + + if installerPod != nil { + t.Fatalf("expected first sync not to create installer pod") + } + + if err := c.sync(); err != nil { + t.Fatal(err) + } + + if installerPod == nil { + t.Fatalf("expected to create installer pod") + } + + if installerPod.Spec.Containers[0].Image != "docker.io/foo/bar" { + t.Fatalf("expected docker.io/foo/bar image, got %q", installerPod.Spec.Containers[0].Image) + } + + if installerPod.Spec.Containers[0].Command[0] != "/bin/true" { + t.Fatalf("expected /bin/true as a command, got %q", installerPod.Spec.Containers[0].Command[0]) + } + + if installerPod.Name != "installer-1-test-node-1" { + t.Fatalf("expected name installer-1-test-node-1, got %q", installerPod.Name) + } + + if installerPod.Namespace != "test" { + t.Fatalf("expected test namespace, got %q", installerPod.Namespace) + } + + expectedArgs := []string{ + "-v=2", + "--revision=1", + "--namespace=test", + "--pod=test-config", + "--resource-dir=/etc/kubernetes/static-pod-resources", + "--pod-manifest-dir=/etc/kubernetes/manifests", + "--configmaps=test-config", + "--secrets=test-secret", + } + + if len(expectedArgs) != len(installerPod.Spec.Containers[0].Args) { + t.Fatalf("expected arguments does not match container arguments: %#v != %#v", expectedArgs, installerPod.Spec.Containers[0].Args) + } + + for i, v := range installerPod.Spec.Containers[0].Args { + if expectedArgs[i] != v { + t.Errorf("arg[%d] expected %q, got %q", i, expectedArgs[i], v) + } + } +} + +func TestEnsureInstallerPod(t *testing.T) { + tests := []struct { + name string + expectedArgs []string + configs []revision.RevisionResource + secrets []revision.RevisionResource + expectedErr string + }{ + { + name: "normal", + expectedArgs: []string{ + "-v=2", + "--revision=1", + "--namespace=test", + "--pod=test-config", + "--resource-dir=/etc/kubernetes/static-pod-resources", + "--pod-manifest-dir=/etc/kubernetes/manifests", + "--configmaps=test-config", + "--secrets=test-secret", + }, + configs: []revision.RevisionResource{{Name: "test-config"}}, + secrets: []revision.RevisionResource{{Name: "test-secret"}}, + }, + { + name: "optional", + expectedArgs: []string{ + "-v=2", + "--revision=1", + "--namespace=test", + "--pod=test-config", + "--resource-dir=/etc/kubernetes/static-pod-resources", + "--pod-manifest-dir=/etc/kubernetes/manifests", + "--configmaps=test-config", + "--configmaps=test-config-2", + "--optional-configmaps=test-config-opt", + "--secrets=test-secret", + "--secrets=test-secret-2", + "--optional-secrets=test-secret-opt", + }, + configs: []revision.RevisionResource{ + {Name: "test-config"}, + {Name: "test-config-2"}, + {Name: "test-config-opt", Optional: true}}, + secrets: []revision.RevisionResource{ + {Name: "test-secret"}, + {Name: "test-secret-2"}, + {Name: "test-secret-opt", Optional: true}}, + }, + { + name: "first-cm-not-optional", + expectedArgs: []string{ + "-v=2", + "--revision=1", + "--namespace=test", + "--pod=test-config", + "--resource-dir=/etc/kubernetes/static-pod-resources", + "--pod-manifest-dir=/etc/kubernetes/manifests", + "--configmaps=test-config", + "--secrets=test-secret", + }, + configs: []revision.RevisionResource{{Name: "test-config", Optional: true}}, + secrets: []revision.RevisionResource{{Name: "test-secret"}}, + expectedErr: "pod configmap test-config is required, cannot be optional", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset() + + var installerPod *corev1.Pod + kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) + return false, nil, nil + }) + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) + + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + + c := NewInstallerController( + "test", "test-pod", + tt.configs, + tt.secrets, + []string{"/bin/true"}, + kubeInformers, + fakeStaticPodOperatorClient, + kubeClient.CoreV1(), + kubeClient.CoreV1(), + kubeClient.CoreV1(), + eventRecorder, + ) + c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { + return []metav1.OwnerReference{}, nil + } + err := c.ensureInstallerPod("test-node-1", &operatorv1.StaticPodOperatorSpec{}, 1) + if err != nil { + if tt.expectedErr == "" { + t.Errorf("InstallerController.ensureInstallerPod() expected no error, got = %v", err) + return + } + if tt.expectedErr != err.Error() { + t.Errorf("InstallerController.ensureInstallerPod() got error = %v, wanted %s", err, tt.expectedErr) + return + } + return + } + if tt.expectedErr != "" { + t.Errorf("InstallerController.ensureInstallerPod() passed but expected error %s", tt.expectedErr) + } + + if len(tt.expectedArgs) != len(installerPod.Spec.Containers[0].Args) { + t.Fatalf("expected arguments does not match container arguments: %#v != %#v", tt.expectedArgs, installerPod.Spec.Containers[0].Args) + } + + for i, v := range installerPod.Spec.Containers[0].Args { + if tt.expectedArgs[i] != v { + t.Errorf("arg[%d] expected %q, got %q", i, tt.expectedArgs[i], v) + } + } + }) + } +} + +func TestCreateInstallerPodMultiNode(t *testing.T) { + newStaticPod := func(name string, revision int, phase corev1.PodPhase, ready bool) *corev1.Pod { + condStatus := corev1.ConditionTrue + if !ready { + condStatus = corev1.ConditionFalse + } + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "test", + Labels: map[string]string{"revision": strconv.Itoa(revision)}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ + { + Status: condStatus, + Type: corev1.PodReady, + }, + }, + Phase: phase, + }, + } + } + + tests := []struct { + name string + nodeStatuses []operatorv1.NodeStatus + staticPods []*corev1.Pod + latestAvailableRevision int32 + expectedUpgradeOrder []int + expectedSyncError []bool + updateStatusErrors []error + numOfInstallersOOM int + }{ + { + name: "three fresh nodes", + latestAvailableRevision: 1, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-0", + }, + { + NodeName: "test-node-1", + }, + { + NodeName: "test-node-2", + }, + }, + expectedUpgradeOrder: []int{0, 1, 2}, + }, + { + name: "three nodes with current revision, all static pods ready", + latestAvailableRevision: 2, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-0", + CurrentRevision: 1, + }, + { + NodeName: "test-node-1", + CurrentRevision: 1, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{0, 1, 2}, + }, + { + name: "one node already transitioning", + latestAvailableRevision: 2, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-0", + CurrentRevision: 1, + }, + { + NodeName: "test-node-1", + CurrentRevision: 1, + TargetRevision: 2, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{1, 0, 2}, + }, + { + name: "one node already transitioning, although it is newer", + latestAvailableRevision: 3, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-0", + CurrentRevision: 1, + }, + { + NodeName: "test-node-1", + CurrentRevision: 2, + TargetRevision: 3, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{1, 0, 2}, + }, + { + name: "three nodes, 2 not updated, one with failure in last revision", + latestAvailableRevision: 2, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-0", + CurrentRevision: 1, + }, + { + NodeName: "test-node-1", + CurrentRevision: 1, + LastFailedRevision: 2, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{}, + }, + { + name: "three nodes, 2 not updated, one with failure in old revision", + latestAvailableRevision: 3, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-0", + CurrentRevision: 2, + }, + { + NodeName: "test-node-1", + CurrentRevision: 2, + LastFailedRevision: 1, + }, + { + NodeName: "test-node-2", + CurrentRevision: 2, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{0, 1, 2}, + }, + { + name: "three nodes with outdated current revision, second static pods unready", + latestAvailableRevision: 2, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 1, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + { + NodeName: "test-node-3", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{1, 0, 2}, + }, + { + name: "four nodes with outdated current revision, installer of 2nd was OOM killed, two more OOM happen, then success", + latestAvailableRevision: 2, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 2, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + { + NodeName: "test-node-3", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + // we call sync 2*3 times: + // 1. notice update of node 1 + // 2. create installer for node 1, OOM, fall-through, notice update of node 1 + // 3. create installer for node 1, OOM, fall-through, notice update of node 1 + // 4. create installer for node 1, which succeeds, set CurrentRevision + // 5. notice update of node 2 + // 6. create installer for node 2, which succeeds, set CurrentRevision + expectedUpgradeOrder: []int{1, 1, 1, 2}, + numOfInstallersOOM: 2, + }, + { + name: "three nodes with outdated current revision, 2nd & 3rd static pods unready", + latestAvailableRevision: 2, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 1, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + { + NodeName: "test-node-3", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, false), + }, + expectedUpgradeOrder: []int{1, 2, 0}, + }, + { + name: "updated node unready and newer version available, but updated again before older nodes are touched", + latestAvailableRevision: 3, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 1, + }, + { + NodeName: "test-node-2", + CurrentRevision: 2, + }, + { + NodeName: "test-node-3", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{1, 0, 2}, + }, + { + name: "two nodes on revision 1 and one node on revision 4", + latestAvailableRevision: 5, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 4, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + { + NodeName: "test-node-3", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 4, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), + }, + expectedUpgradeOrder: []int{1, 2, 0}, + }, + { + name: "two nodes 2 revisions behind and 1 node on latest available revision", + latestAvailableRevision: 3, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 3, + }, + { + NodeName: "test-node-2", + CurrentRevision: 1, + }, + { + NodeName: "test-node-3", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodSucceeded, true), + }, + expectedUpgradeOrder: []int{1, 2}, + }, + { + name: "two nodes at different revisions behind and 1 node on latest available revision", + latestAvailableRevision: 3, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 3, + }, + { + NodeName: "test-node-2", + CurrentRevision: 2, + }, + { + NodeName: "test-node-3", + CurrentRevision: 1, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodSucceeded, true), + }, + expectedUpgradeOrder: []int{2, 1}, + }, + { + name: "second node with old static pod than current revision", + latestAvailableRevision: 3, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 2, + }, + { + NodeName: "test-node-2", + CurrentRevision: 2, + }, + { + NodeName: "test-node-3", + CurrentRevision: 2, + }, + }, + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, corev1.PodRunning, false), + }, + expectedUpgradeOrder: []int{1, 2, 0}, + }, + { + name: "first update status fails", + latestAvailableRevision: 2, + nodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + }, + }, + expectedUpgradeOrder: []int{0}, + updateStatusErrors: []error{errors.NewInternalError(fmt.Errorf("unknown"))}, + expectedSyncError: []bool{true}, + }, + } + + for i, test := range tests { + t.Run(test.name, func(t *testing.T) { + createdInstallerPods := []*corev1.Pod{} + installerPods := map[string]*corev1.Pod{} + updatedStaticPods := map[string]*corev1.Pod{} + + namespace := fmt.Sprintf("test-%d", i) + + installerNodeAndID := func(installerName string) (string, int) { + ss := strings.SplitN(strings.TrimPrefix(installerName, "installer-"), "-", 2) + id, err := strconv.Atoi(ss[0]) + if err != nil { + t.Fatalf("unexpected id derived from install pod name %q: %v", installerName, err) + } + return ss[1], id + } + + kubeClient := fake.NewSimpleClientset( + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-secret"}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-config"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: fmt.Sprintf("%s-%d", "test-secret", test.latestAvailableRevision)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: fmt.Sprintf("%s-%d", "test-config", test.latestAvailableRevision)}}, + ) + kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + createdPod := action.(ktesting.CreateAction).GetObject().(*corev1.Pod) + createdInstallerPods = append(createdInstallerPods, createdPod) + if _, found := installerPods[createdPod.Name]; found { + return false, nil, errors.NewAlreadyExists(corev1.SchemeGroupVersion.WithResource("pods").GroupResource(), createdPod.Name) + } + installerPods[createdPod.Name] = createdPod + if test.numOfInstallersOOM > 0 { + test.numOfInstallersOOM-- + + createdPod.Status.Phase = corev1.PodFailed + createdPod.Status.ContainerStatuses = []corev1.ContainerStatus{ + { + Name: "container", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "OOMKilled", + Message: "killed by OOM", + }, + }, + Ready: false, + }, + } + } else { + // Once the installer pod is created, set its status to succeeded. + // Note that in reality, this will probably take couple sync cycles to happen, however it is useful to do this fast + // to rule out timing bugs. + createdPod.Status.Phase = corev1.PodSucceeded + + nodeName, id := installerNodeAndID(createdPod.Name) + staticPodName := mirrorPodNameForNode("test-pod", nodeName) + + updatedStaticPods[staticPodName] = newStaticPod(staticPodName, id, corev1.PodRunning, true) + } + + return true, nil, nil + }) + + // When newNodeStateForInstallInProgress ask for pod, give it a pod that already succeeded. + kubeClient.PrependReactor("get", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + podName := action.(ktesting.GetAction).GetName() + if pod, found := installerPods[podName]; found { + return true, pod, nil + } + if pod, exists := updatedStaticPods[podName]; exists { + if pod == nil { + return false, nil, nil + } + return true, pod, nil + } + for _, pod := range test.staticPods { + if pod.Name == podName { + return true, pod, nil + } + } + return false, nil, nil + }) + kubeClient.PrependReactor("delete", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + podName := action.(ktesting.GetAction).GetName() + if pod, found := installerPods[podName]; found { + delete(installerPods, podName) + return true, pod, nil + } + return false, nil, nil + }) + + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test-"+test.name)) + statusUpdateCount := 0 + statusUpdateErrorFunc := func(rv string, status *operatorv1.StaticPodOperatorStatus) error { + var err error + if statusUpdateCount < len(test.updateStatusErrors) { + err = test.updateStatusErrors[statusUpdateCount] + } + statusUpdateCount++ + return err + } + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: test.latestAvailableRevision, + NodeStatuses: test.nodeStatuses, + }, + statusUpdateErrorFunc, + nil, + ) + + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + + c := NewInstallerController( + namespace, "test-pod", + []revision.RevisionResource{{Name: "test-config"}}, + []revision.RevisionResource{{Name: "test-secret"}}, + []string{"/bin/true"}, + kubeInformers, + fakeStaticPodOperatorClient, + kubeClient.CoreV1(), + kubeClient.CoreV1(), + kubeClient.CoreV1(), + eventRecorder, + ) + c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { + return []metav1.OwnerReference{}, nil + } + c.installerPodImageFn = func() string { return "docker.io/foo/bar" } + + // Each node needs at least 2 syncs to first create the pod and then acknowledge its existence. + for i := 1; i <= len(test.nodeStatuses)*2+1; i++ { + err := c.sync() + expectedErr := false + if i-1 < len(test.expectedSyncError) && test.expectedSyncError[i-1] { + expectedErr = true + } + if err != nil && !expectedErr { + t.Errorf("failed to execute %d sync: %v", i, err) + } else if err == nil && expectedErr { + t.Errorf("expected sync error in sync %d, but got nil", i) + } + } + + for i := range test.expectedUpgradeOrder { + if i >= len(createdInstallerPods) { + t.Fatalf("expected more (got only %d) installer pods in the node order %v", len(createdInstallerPods), test.expectedUpgradeOrder[i:]) + } + + nodeName, _ := installerNodeAndID(createdInstallerPods[i].Name) + if expected, got := test.nodeStatuses[test.expectedUpgradeOrder[i]].NodeName, nodeName; expected != got { + t.Errorf("expected installer pod number %d to be for node %q, but got %q", i, expected, got) + } + } + if len(test.expectedUpgradeOrder) < len(createdInstallerPods) { + t.Errorf("too many installer pods created, expected %d, got %d", len(test.expectedUpgradeOrder), len(createdInstallerPods)) + } + }) + } + +} + +func TestInstallerController_manageInstallationPods(t *testing.T) { + type fields struct { + targetNamespace string + staticPodName string + configMaps []revision.RevisionResource + secrets []revision.RevisionResource + command []string + operatorConfigClient v1helpers.StaticPodOperatorClient + kubeClient kubernetes.Interface + eventRecorder events.Recorder + queue workqueue.RateLimitingInterface + installerPodImageFn func() string + } + type args struct { + operatorSpec *operatorv1.StaticPodOperatorSpec + originalOperatorStatus *operatorv1.StaticPodOperatorStatus + resourceVersion string + } + tests := []struct { + name string + fields fields + args args + want bool + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &InstallerController{ + targetNamespace: tt.fields.targetNamespace, + staticPodName: tt.fields.staticPodName, + configMaps: tt.fields.configMaps, + secrets: tt.fields.secrets, + command: tt.fields.command, + operatorClient: tt.fields.operatorConfigClient, + configMapsGetter: tt.fields.kubeClient.CoreV1(), + podsGetter: tt.fields.kubeClient.CoreV1(), + eventRecorder: tt.fields.eventRecorder, + queue: tt.fields.queue, + installerPodImageFn: tt.fields.installerPodImageFn, + } + got, err := c.manageInstallationPods(tt.args.operatorSpec, tt.args.originalOperatorStatus, tt.args.resourceVersion) + if (err != nil) != tt.wantErr { + t.Errorf("InstallerController.manageInstallationPods() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("InstallerController.manageInstallationPods() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNodeToStartRevisionWith(t *testing.T) { + type StaticPod struct { + name string + state staticPodState + revision int32 + } + type Test struct { + name string + nodes []operatorv1.NodeStatus + pods []StaticPod + expected int + expectedErr bool + } + + newNode := func(name string, current, target int32) operatorv1.NodeStatus { + return operatorv1.NodeStatus{NodeName: name, CurrentRevision: current, TargetRevision: target} + } + + for _, test := range []Test{ + { + name: "empty", + expectedErr: true, + }, + { + name: "no pods", + pods: nil, + nodes: []operatorv1.NodeStatus{ + newNode("a", 0, 0), + newNode("b", 0, 0), + newNode("c", 0, 0), + }, + expected: 0, + }, + { + name: "all ready", + pods: []StaticPod{ + {"a", staticPodStateReady, 1}, + {"b", staticPodStateReady, 1}, + {"c", staticPodStateReady, 1}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 1, 0), + newNode("b", 1, 0), + newNode("c", 1, 0), + }, + expected: 0, + }, + { + name: "one failed", + pods: []StaticPod{ + {"a", staticPodStateReady, 1}, + {"b", staticPodStateReady, 1}, + {"c", staticPodStateFailed, 1}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 1, 0), + newNode("b", 1, 0), + newNode("c", 1, 0), + }, + expected: 2, + }, + { + name: "one pending", + pods: []StaticPod{ + {"a", staticPodStateReady, 1}, + {"b", staticPodStateReady, 1}, + {"c", staticPodStatePending, 1}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 1, 0), + newNode("b", 1, 0), + newNode("c", 0, 0), + }, + expected: 2, + }, + { + name: "multiple pending", + pods: []StaticPod{ + {"a", staticPodStateReady, 1}, + {"b", staticPodStatePending, 1}, + {"c", staticPodStatePending, 1}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 1, 0), + newNode("b", 0, 0), + newNode("c", 0, 0), + }, + expected: 1, + }, + { + name: "one updating", + pods: []StaticPod{ + {"a", staticPodStateReady, 1}, + {"b", staticPodStatePending, 0}, + {"c", staticPodStateReady, 0}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 1, 0), + newNode("b", 0, 1), + newNode("c", 0, 0), + }, + expected: 1, + }, + { + name: "pods missing", + pods: []StaticPod{ + {"a", staticPodStateReady, 1}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 1, 0), + newNode("b", 0, 0), + newNode("c", 0, 0), + }, + expected: 1, + }, + { + name: "one old", + pods: []StaticPod{ + {"a", staticPodStateReady, 2}, + {"b", staticPodStateReady, 1}, + {"c", staticPodStateReady, 2}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 2, 0), + newNode("b", 2, 0), + newNode("c", 2, 0), + }, + expected: 1, + }, + { + name: "one behind, but as stated", + pods: []StaticPod{ + {"a", staticPodStateReady, 2}, + {"b", staticPodStateReady, 1}, + {"c", staticPodStateReady, 2}, + }, + nodes: []operatorv1.NodeStatus{ + newNode("a", 2, 0), + newNode("b", 1, 0), + newNode("c", 2, 0), + }, + expected: 1, + }, + } { + t.Run(test.name, func(t *testing.T) { + fakeGetStaticPodState := func(nodeName string) (state staticPodState, revision, reason string, errs []string, err error) { + for _, p := range test.pods { + if p.name == nodeName { + return p.state, strconv.Itoa(int(p.revision)), "", nil, nil + } + } + return staticPodStatePending, "", "", nil, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, nodeName) + } + i, _, err := nodeToStartRevisionWith(fakeGetStaticPodState, test.nodes) + if err == nil && test.expectedErr { + t.Fatalf("expected error, got none") + } + if err != nil && !test.expectedErr { + t.Fatalf("unexpected error: %v", err) + } + if i != test.expected { + t.Errorf("expected node ID %d, got %d", test.expected, i) + } + }) + } +} + +func TestSetConditions(t *testing.T) { + + type TestCase struct { + name string + latestAvailableRevision int32 + lastFailedRevision int32 + currentRevisions []int32 + expectedAvailableStatus operatorv1.ConditionStatus + expectedProgressingStatus operatorv1.ConditionStatus + expectedFailingStatus operatorv1.ConditionStatus + } + + testCase := func(name string, available, progressing, failed bool, lastFailedRevision, latest int32, current ...int32) TestCase { + availableStatus := operatorv1.ConditionFalse + pendingStatus := operatorv1.ConditionFalse + expectedFailingStatus := operatorv1.ConditionFalse + if available { + availableStatus = operatorv1.ConditionTrue + } + if progressing { + pendingStatus = operatorv1.ConditionTrue + } + if failed { + expectedFailingStatus = operatorv1.ConditionTrue + } + return TestCase{name, latest, lastFailedRevision, current, availableStatus, pendingStatus, expectedFailingStatus} + } + + testCases := []TestCase{ + testCase("AvailableProgressingDegraded", true, true, true, 1, 2, 2, 1, 2, 1), + testCase("AvailableProgressing", true, true, false, 0, 2, 2, 1, 2, 1), + testCase("AvailableNotProgressing", true, false, false, 0, 2, 2, 2, 2), + testCase("NotAvailableProgressing", false, true, false, 0, 2, 0, 0), + testCase("NotAvailableAtOldLevelProgressing", true, true, false, 0, 2, 1, 1), + testCase("NotAvailableNotProgressing", false, false, false, 0, 2), + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + status := &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: tc.latestAvailableRevision, + } + for _, current := range tc.currentRevisions { + status.NodeStatuses = append(status.NodeStatuses, operatorv1.NodeStatus{CurrentRevision: current, LastFailedRevision: tc.lastFailedRevision}) + } + setAvailableProgressingNodeInstallerFailingConditions(status) + + availableCondition := v1helpers.FindOperatorCondition(status.Conditions, operatorv1.OperatorStatusTypeAvailable) + if availableCondition == nil { + t.Error("Available condition: not found") + } else if availableCondition.Status != tc.expectedAvailableStatus { + t.Errorf("Available condition: expected status %v, actual status %v", tc.expectedAvailableStatus, availableCondition.Status) + } + + pendingCondition := v1helpers.FindOperatorCondition(status.Conditions, operatorv1.OperatorStatusTypeProgressing) + if pendingCondition == nil { + t.Error("Progressing condition: not found") + } else if pendingCondition.Status != tc.expectedProgressingStatus { + t.Errorf("Progressing condition: expected status %v, actual status %v", tc.expectedProgressingStatus, pendingCondition.Status) + } + + failingCondition := v1helpers.FindOperatorCondition(status.Conditions, condition.NodeInstallerDegradedConditionType) + if failingCondition == nil { + t.Error("Failing condition: not found") + } else if failingCondition.Status != tc.expectedFailingStatus { + t.Errorf("Failing condition: expected status %v, actual status %v", tc.expectedFailingStatus, failingCondition.Status) + } + }) + } + +} + +func TestEnsureRequiredResources(t *testing.T) { + tests := []struct { + name string + certConfigMaps []revision.RevisionResource + certSecrets []revision.RevisionResource + + revisionNumber int32 + configMaps []revision.RevisionResource + secrets []revision.RevisionResource + + startingResources []runtime.Object + expectedErr string + }{ + { + name: "none", + }, + { + name: "skip-optional", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm", Optional: true}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s", Optional: true}, + }, + }, + { + name: "wait-required", + configMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + secrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + expectedErr: "missing required resources: [configmaps: foo-cm-0, secrets: foo-s-0]", + }, + { + name: "found-required", + configMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + secrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + startingResources: []runtime.Object{ + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-cm-0"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-s-0"}}, + }, + }, + { + name: "wait-required-certs", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + expectedErr: "missing required resources: [configmaps: foo-cm, secrets: foo-s]", + }, + { + name: "found-required-certs", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + startingResources: []runtime.Object{ + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-cm"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-s"}}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := fake.NewSimpleClientset(test.startingResources...) + c := &InstallerController{ + targetNamespace: "ns", + certConfigMaps: test.certConfigMaps, + certSecrets: test.certSecrets, + configMaps: test.configMaps, + secrets: test.secrets, + eventRecorder: eventstesting.NewTestingEventRecorder(t), + + configMapsGetter: client.CoreV1(), + secretsGetter: client.CoreV1(), + } + + actual := c.ensureRequiredResourcesExist(test.revisionNumber) + switch { + case len(test.expectedErr) == 0 && actual == nil: + case len(test.expectedErr) == 0 && actual != nil: + t.Fatal(actual) + case len(test.expectedErr) != 0 && actual == nil: + t.Fatal(actual) + case len(test.expectedErr) != 0 && actual != nil && !strings.Contains(actual.Error(), test.expectedErr): + t.Fatalf("actual error: %q does not match expected: %q", actual.Error(), test.expectedErr) + } + + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go new file mode 100644 index 00000000000..87256fe20ad --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go @@ -0,0 +1,187 @@ +package installer + +import ( + "reflect" + "sort" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]sets.Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) { + for _, item := range items { + s[item] = sets.Empty{} + } +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml new file mode 100644 index 00000000000..c8453c002ab --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/manifests/installer-pod.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: installer +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: installer + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + resources: + requests: + memory: 100M + limits: + memory: 100M + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go new file mode 100644 index 00000000000..4c2993d8d13 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go @@ -0,0 +1,255 @@ +package installerstate + +import ( + "context" + "fmt" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const installerStateControllerWorkQueueKey = "key" + +// maxToleratedPodPendingDuration is the maximum time we tolerate installer pod in pending state +var maxToleratedPodPendingDuration = 5 * time.Minute + +type InstallerStateController struct { + podsGetter corev1client.PodsGetter + eventsGetter corev1client.EventsGetter + queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + targetNamespace string + operatorClient v1helpers.StaticPodOperatorClient + eventRecorder events.Recorder + + timeNowFn func() time.Time +} + +func NewInstallerStateController(kubeInformersForTargetNamespace informers.SharedInformerFactory, + podsGetter corev1client.PodsGetter, + eventsGetter corev1client.EventsGetter, + operatorClient v1helpers.StaticPodOperatorClient, + targetNamespace string, + recorder events.Recorder, +) *InstallerStateController { + c := &InstallerStateController{ + podsGetter: podsGetter, + eventsGetter: eventsGetter, + targetNamespace: targetNamespace, + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("installer-state-controller"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "InstallerStateController"), + timeNowFn: time.Now, + } + + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + + return c +} + +func (c *InstallerStateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + } +} + +// degradedConditionNames lists all supported condition types. +var degradedConditionNames = []string{ + "InstallerPodPendingDegraded", + "InstallerPodContainerWaitingDegraded", + "InstallerPodNetworkingDegraded", +} + +func (c *InstallerStateController) sync() error { + pods, err := c.podsGetter.Pods(c.targetNamespace).List(metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{"app": "installer"}).String(), + }) + if err != nil { + return err + } + + // collect all startingObjects that are in pending state for longer than maxToleratedPodPendingDuration + pendingPods := []*v1.Pod{} + for _, pod := range pods.Items { + if pod.Status.Phase != v1.PodPending || pod.Status.StartTime == nil { + continue + } + if c.timeNowFn().Sub(pod.Status.StartTime.Time) >= maxToleratedPodPendingDuration { + pendingPods = append(pendingPods, pod.DeepCopy()) + } + } + + // in theory, there should never be two installer startingObjects pending as we don't roll new installer pod + // until the previous/existing pod has finished its job. + foundConditions := []operatorv1.OperatorCondition{} + foundConditions = append(foundConditions, c.handlePendingInstallerPods(pendingPods)...) + + // handle networking conditions that are based on events + networkConditions, err := c.handlePendingInstallerPodsNetworkEvents(pendingPods) + if err != nil { + return err + } + foundConditions = append(foundConditions, networkConditions...) + + updateConditionFuncs := []v1helpers.UpdateStaticPodStatusFunc{} + + // check the supported degraded foundConditions and check if any pending pod matching them. + for _, degradedConditionName := range degradedConditionNames { + // clean up existing foundConditions + updatedCondition := operatorv1.OperatorCondition{ + Type: degradedConditionName, + Status: operatorv1.ConditionFalse, + } + if condition := v1helpers.FindOperatorCondition(foundConditions, degradedConditionName); condition != nil { + updatedCondition = *condition + } + updateConditionFuncs = append(updateConditionFuncs, v1helpers.UpdateStaticPodConditionFn(updatedCondition)) + } + + if _, _, err := v1helpers.UpdateStaticPodStatus(c.operatorClient, updateConditionFuncs...); err != nil { + return err + } + + return nil +} + +func (c *InstallerStateController) handlePendingInstallerPodsNetworkEvents(pods []*v1.Pod) ([]operatorv1.OperatorCondition, error) { + conditions := []operatorv1.OperatorCondition{} + if len(pods) == 0 { + return conditions, nil + } + namespaceEvents, err := c.eventsGetter.Events(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, event := range namespaceEvents.Items { + if event.InvolvedObject.Kind != "Pod" { + continue + } + if !strings.Contains(event.Message, "failed to create pod network") { + continue + } + for _, pod := range pods { + if pod.Name != event.InvolvedObject.Name { + continue + } + // If we already find the pod that is pending because of the networking problem, skip other pods. + // This will reduce the events we fire. + if v1helpers.FindOperatorCondition(conditions, "InstallerPodNetworkingDegraded") != nil { + break + } + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodNetworkingDegraded", + Status: operatorv1.ConditionTrue, + Reason: event.Reason, + Message: fmt.Sprintf("Pod %q on node %q observed degraded networking: %s", pod.Name, pod.Spec.NodeName, event.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + } + return conditions, nil +} + +func (c *InstallerStateController) handlePendingInstallerPods(pods []*v1.Pod) []operatorv1.OperatorCondition { + conditions := []operatorv1.OperatorCondition{} + for _, pod := range pods { + // at this point we already know the pod is pending for longer than expected + pendingTime := c.timeNowFn().Sub(pod.Status.StartTime.Time) + + // the pod is in the pending state for longer than maxToleratedPodPendingDuration, report the reason and message + // as degraded condition for the operator. + if len(pod.Status.Reason) > 0 { + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodPendingDegraded", + Reason: pod.Status.Reason, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("Pod %q on node %q is Pending for %s because %s", pod.Name, pod.Spec.NodeName, pendingTime, pod.Status.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + + // one or more containers are in waiting state for longer than maxToleratedPodPendingDuration, report the reason and message + // as degraded condition for the operator. + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.State.Waiting == nil { + continue + } + if state := containerStatus.State.Waiting; len(state.Reason) > 0 { + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodContainerWaitingDegraded", + Reason: state.Reason, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("Pod %q on node %q container %q is waiting for %s because %s", pod.Name, pod.Spec.NodeName, containerStatus.Name, pendingTime, state.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + } + } + + return conditions +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *InstallerStateController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting InstallerStateController") + defer klog.Infof("Shutting down InstallerStateController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + // add time based trigger + go wait.UntilWithContext(ctx, func(context.Context) { c.queue.Add(installerStateControllerWorkQueueKey) }, time.Minute) + + <-ctx.Done() +} + +func (c *InstallerStateController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *InstallerStateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go new file mode 100644 index 00000000000..c6e39856ffc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go @@ -0,0 +1,177 @@ +package installerstate + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func newInstallerPod(name string, mutateStatusFn func(*corev1.PodStatus)) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "test", + Labels: map[string]string{"app": "installer"}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{}, + } + mutateStatusFn(&pod.Status) + return pod +} + +func newInstallerPodNetworkEvent(mutateFn func(*corev1.Event)) *corev1.Event { + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.GenerateName("test"), + Namespace: "test", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "Pod", + Name: "installer-1", + }, + Reason: "FailedCreatePodSandBox", + Message: `'(combined from similar events): Failed create pod sandbox: rpc error: + code = Unknown desc = failed to create pod network sandbox k8s_installer-5-control-plane-1_openshift-kube-apiserver_900db7f3-d2ce-11e9-8fc8-005056be0641_0(121698f4862fd67157ca586cab18aefb048fe5d7b3bd87516098ac0e91a90a13): + Multus: Err adding pod to network "openshift-sdn": Multus: error in invoke Delegate + add - "openshift-sdn": failed to send CNI request: Post http://dummy/: dial unix + /var/run/openshift-sdn/cniserver/socket: connect: connection refused'`, + } + if mutateFn != nil { + mutateFn(event) + } + return event +} + +func TestInstallerStateController(t *testing.T) { + tests := []struct { + name string + startingObjects []runtime.Object + evalConditions func(t *testing.T, conditions []operatorv1.OperatorCondition) + }{ + { + name: "should report pending pod", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + }, + }, + { + name: "should report pod with failing networking", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + newInstallerPodNetworkEvent(nil), + newInstallerPodNetworkEvent(nil), + newInstallerPodNetworkEvent(nil), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodNetworkingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodNetworkingDegraded condition to be True") + } + }, + }, + { + name: "should report pending pod with waiting container", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + status.ContainerStatuses = append(status.ContainerStatuses, corev1.ContainerStatus{Name: "test", State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{ + Reason: "PodInitializing", + Message: "initializing error", + }}}) + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + }, + }, + { + name: "should report false when no pending startingObjects", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodRunning + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(tt.startingObjects...) + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) + stopCh := make(chan struct{}) + go kubeInformers.Start(stopCh) + defer close(stopCh) + + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient(&operatorv1.StaticPodOperatorSpec{}, &operatorv1.StaticPodOperatorStatus{}, nil, nil) + eventRecorder := eventstesting.NewTestingEventRecorder(t) + controller := NewInstallerStateController(kubeInformers, kubeClient.CoreV1(), kubeClient.CoreV1(), fakeStaticPodOperatorClient, "test", eventRecorder) + if err := controller.sync(); err != nil { + t.Error(err) + return + } + + _, status, _, err := fakeStaticPodOperatorClient.GetOperatorState() + if err != nil { + t.Error(err) + return + } + if tt.evalConditions != nil { + tt.evalConditions(t, status.Conditions) + } + }) + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata/bindata.go new file mode 100644 index 00000000000..70489277cd9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata/bindata.go @@ -0,0 +1,314 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml +// pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml +// pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring`) + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml, nil +} + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + # TODO this should be a clusterrole + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch`) + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml, nil +} + +func pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml = []byte(`apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: monitor + namespace: {{ .TargetNamespace }} +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + metricRelabelings: + - action: drop + regex: etcd_(debugging|disk|request|server).* + sourceLabels: + - __name__ + port: https + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + serverName: apiserver.{{ .TargetNamespace }}.svc + jobLabel: component + namespaceSelector: + matchNames: + - {{ .TargetNamespace }} + selector: + matchLabels: + app: {{ .TargetNamespace }}`) + +func pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml, nil +} + +func pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml": pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml, + "pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml": pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml, + "pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml": pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "monitoring": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "prometheus-role-binding.yaml": {pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleBindingYaml, map[string]*bintree{}}, + "prometheus-role.yaml": {pkgOperatorStaticpodControllerMonitoringManifestsPrometheusRoleYaml, map[string]*bintree{}}, + "service-monitor.yaml": {pkgOperatorStaticpodControllerMonitoringManifestsServiceMonitorYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml new file mode 100644 index 00000000000..2b3289912f1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: prometheus-k8s +subjects: + - kind: ServiceAccount + name: prometheus-k8s + namespace: openshift-monitoring \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml new file mode 100644 index 00000000000..55957ab8e34 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/prometheus-role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + # TODO this should be a clusterrole + name: prometheus-k8s + namespace: {{ .TargetNamespace }} +rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - pods + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml new file mode 100644 index 00000000000..17f93e2c66c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/manifests/service-monitor.yaml @@ -0,0 +1,26 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: monitor + namespace: {{ .TargetNamespace }} +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + interval: 30s + metricRelabelings: + - action: drop + regex: etcd_(debugging|disk|request|server).* + sourceLabels: + - __name__ + port: https + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + serverName: apiserver.{{ .TargetNamespace }}.svc + jobLabel: component + namespaceSelector: + matchNames: + - {{ .TargetNamespace }} + selector: + matchLabels: + app: {{ .TargetNamespace }} \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go new file mode 100644 index 00000000000..62a069c3d54 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go @@ -0,0 +1,209 @@ +package monitoring + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + rbaclisterv1 "k8s.io/client-go/listers/rbac/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/monitoring" +) + +var syntheticRequeueError = fmt.Errorf("synthetic requeue request") + +type MonitoringResourceController struct { + targetNamespace string + serviceMonitorName string + + clusterRoleBindingLister rbaclisterv1.ClusterRoleBindingLister + kubeClient kubernetes.Interface + dynamicClient dynamic.Interface + operatorClient v1helpers.StaticPodOperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewMonitoringResourceController creates a new backing resource controller. +func NewMonitoringResourceController( + targetNamespace string, + serviceMonitorName string, + operatorClient v1helpers.StaticPodOperatorClient, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + kubeClient kubernetes.Interface, + dynamicClient dynamic.Interface, + eventRecorder events.Recorder, +) *MonitoringResourceController { + c := &MonitoringResourceController{ + targetNamespace: targetNamespace, + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("monitoring-resource-controller"), + serviceMonitorName: serviceMonitorName, + + clusterRoleBindingLister: kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Lister(), + cachesToSync: []cache.InformerSynced{ + kubeInformersForTargetNamespace.Core().V1().ServiceAccounts().Informer().HasSynced, + operatorClient.Informer().HasSynced, + }, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "MonitoringResourceController"), + kubeClient: kubeClient, + dynamicClient: dynamicClient, + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + // TODO: We need a dynamic informer here to observe changes to ServiceMonitor resource. + kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Rbac().V1().ClusterRoleBindings().Informer().HasSynced) + + return c +} + +func (c MonitoringResourceController) mustTemplateAsset(name string) ([]byte, error) { + config := struct { + TargetNamespace string + }{ + TargetNamespace: c.targetNamespace, + } + return assets.MustCreateAssetFromTemplate(name, bindata.MustAsset(filepath.Join(manifestDir, name)), config).Data, nil +} + +func (c MonitoringResourceController) sync() error { + operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + directResourceResults := resourceapply.ApplyDirectly(c.kubeClient, c.eventRecorder, c.mustTemplateAsset, + "manifests/prometheus-role.yaml", + "manifests/prometheus-role-binding.yaml", + ) + + errs := []error{} + for _, currResult := range directResourceResults { + if currResult.Error != nil { + errs = append(errs, fmt.Errorf("%q (%T): %v", currResult.File, currResult.Type, currResult.Error)) + } + } + + serviceMonitorBytes, err := c.mustTemplateAsset("manifests/service-monitor.yaml") + if err != nil { + errs = append(errs, fmt.Errorf("manifests/service-monitor.yaml: %v", err)) + } else { + _, serviceMonitorErr := resourceapply.ApplyServiceMonitor(c.dynamicClient, c.eventRecorder, serviceMonitorBytes) + // This is to handle 'the server could not find the requested resource' which occurs when the CRD is not available + // yet (the CRD is provided by prometheus operator). This produce noise and plenty of events. + if errors.IsNotFound(serviceMonitorErr) { + klog.V(4).Infof("Unable to apply service monitor: %v", err) + return syntheticRequeueError + } else if serviceMonitorErr != nil { + errs = append(errs, serviceMonitorErr) + } + } + + err = v1helpers.NewMultiLineAggregate(errs) + + // NOTE: Failing to create the monitoring resources should not lead to operator failed state. + cond := operatorv1.OperatorCondition{ + Type: condition.MonitoringResourceControllerDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if err != nil { + // this is not a typo. We will not have failing status on our operator for missing servicemonitor since servicemonitoring + // is not a prereq. + cond.Status = operatorv1.ConditionFalse + cond.Reason = "Error" + cond.Message = err.Error() + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + if err == nil { + return updateError + } + } + + return err +} + +func (c *MonitoringResourceController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting MonitoringResourceController") + defer klog.Infof("Shutting down MonitoringResourceController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *MonitoringResourceController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *MonitoringResourceController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + if err != syntheticRequeueError { + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + } + + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *MonitoringResourceController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go new file mode 100644 index 00000000000..7b62524ef13 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go @@ -0,0 +1,151 @@ +package monitoring + +import ( + "path/filepath" + "testing" + "time" + + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "github.com/ghodss/yaml" + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + clienttesting "k8s.io/client-go/testing" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata" +) + +func mustAssetServiceMonitor(namespace string) runtime.Object { + config := struct { + TargetNamespace string + }{ + TargetNamespace: namespace, + } + monitorBytes := assets.MustCreateAssetFromTemplate("manifests/service-monitor.yaml", bindata.MustAsset(filepath.Join(manifestDir, "manifests/service-monitor.yaml")), config).Data + monitorJSON, err := yaml.YAMLToJSON(monitorBytes) + if err != nil { + panic(err) + } + monitorObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, monitorJSON) + if err != nil { + panic(err) + } + required, ok := monitorObj.(*unstructured.Unstructured) + if !ok { + panic("unexpected object") + } + return required +} + +func TestNewMonitoringResourcesController(t *testing.T) { + tests := []struct { + name string + startingObjects []runtime.Object + startingDynamicObjects []runtime.Object + staticPodOperatorClient v1helpers.StaticPodOperatorClient + validateActions func(t *testing.T, actions []clienttesting.Action) + validateDynamicActions func(t *testing.T, actions []clienttesting.Action) + validateStatus func(t *testing.T, status *operatorv1.StaticPodOperatorStatus) + expectSyncError string + }{ + { + name: "create when not exists", + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{}, + nil, + nil, + ), + validateActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 4 { + t.Errorf("expected 4 actions, got %d", len(actions)) + } + if actions[1].GetVerb() != "create" || actions[1].GetResource().Resource != "roles" { + t.Errorf("expected to create service monitor (%+v)", actions[1]) + } + }, + validateDynamicActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 2 { + t.Errorf("expected 2 actions, got %d", len(actions)) + } + if actions[1].GetVerb() != "create" || actions[1].GetResource().Resource != "servicemonitors" { + t.Errorf("expected to create service monitor (%+v)", actions[1]) + } + serviceMonitor := actions[1].(clienttesting.CreateAction).GetObject().(*unstructured.Unstructured) + if serviceMonitor.GetNamespace() != "target-namespace" { + t.Errorf("expected 'target-namespace', got %s", serviceMonitor.GetNamespace()) + } + }, + }, + { + name: "skip when exists", + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{}, + nil, + nil, + ), + startingDynamicObjects: []runtime.Object{mustAssetServiceMonitor("target-namespace")}, + validateActions: func(t *testing.T, actions []clienttesting.Action) {}, + validateDynamicActions: func(t *testing.T, actions []clienttesting.Action) { + if len(actions) != 1 { + t.Errorf("expected 1 action, got %d (%#v)", len(actions), actions) + } + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(tc.startingObjects...) + eventRecorder := events.NewInMemoryRecorder("") + + dynamicScheme := runtime.NewScheme() + dynamicScheme.AddKnownTypeWithName(schema.GroupVersionKind{Group: "monitoring.coreos.com", Version: "v1", Kind: "ServiceMonitor"}, &unstructured.Unstructured{}) + + dynamicClient := dynamicfake.NewSimpleDynamicClient(dynamicScheme, tc.startingDynamicObjects...) + + c := NewMonitoringResourceController( + "target-namespace", + "openshift-monitoring", + tc.staticPodOperatorClient, + informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("target-namespace")), + kubeClient, + dynamicClient, + eventRecorder, + ) + + syncErr := c.sync() + if len(tc.expectSyncError) > 0 && syncErr == nil { + t.Errorf("expected %q error", tc.expectSyncError) + return + } + if len(tc.expectSyncError) > 0 && syncErr != nil && syncErr.Error() != tc.expectSyncError { + t.Errorf("expected %q error, got %q", tc.expectSyncError, syncErr.Error()) + return + } + if syncErr != nil { + t.Errorf("unexpected sync error: %v", syncErr) + return + } + + tc.validateActions(t, kubeClient.Actions()) + tc.validateDynamicActions(t, dynamicClient.Actions()) + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go new file mode 100644 index 00000000000..5a856cf936c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go @@ -0,0 +1,206 @@ +package node + +import ( + "context" + "fmt" + "strings" + "time" + + coreapiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corelisterv1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const nodeControllerWorkQueueKey = "key" + +// NodeController watches for new master nodes and adds them to the node status list in the operator config status. +type NodeController struct { + operatorClient v1helpers.StaticPodOperatorClient + + nodeLister corelisterv1.NodeLister + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewNodeController creates a new node controller. +func NewNodeController( + operatorClient v1helpers.StaticPodOperatorClient, + kubeInformersClusterScoped informers.SharedInformerFactory, + eventRecorder events.Recorder, +) *NodeController { + c := &NodeController{ + operatorClient: operatorClient, + eventRecorder: eventRecorder.WithComponentSuffix("node-controller"), + nodeLister: kubeInformersClusterScoped.Core().V1().Nodes().Lister(), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "NodeController"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersClusterScoped.Core().V1().Nodes().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersClusterScoped.Core().V1().Nodes().Informer().HasSynced) + + return c +} + +func (c NodeController) sync() error { + _, originalOperatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + + selector, err := labels.NewRequirement("node-role.kubernetes.io/master", selection.Equals, []string{""}) + if err != nil { + panic(err) + } + nodes, err := c.nodeLister.List(labels.NewSelector().Add(*selector)) + if err != nil { + return err + } + + newTargetNodeStates := []operatorv1.NodeStatus{} + // remove entries for missing nodes + for i, nodeState := range originalOperatorStatus.NodeStatuses { + found := false + for _, node := range nodes { + if nodeState.NodeName == node.Name { + found = true + } + } + if found { + newTargetNodeStates = append(newTargetNodeStates, originalOperatorStatus.NodeStatuses[i]) + } else { + c.eventRecorder.Warningf("MasterNodeRemoved", "Observed removal of master node %s", nodeState.NodeName) + } + } + + // add entries for new nodes + for _, node := range nodes { + found := false + for _, nodeState := range originalOperatorStatus.NodeStatuses { + if nodeState.NodeName == node.Name { + found = true + } + } + if found { + continue + } + + c.eventRecorder.Eventf("MasterNodeObserved", "Observed new master node %s", node.Name) + newTargetNodeStates = append(newTargetNodeStates, operatorv1.NodeStatus{NodeName: node.Name}) + } + + // detect and report master nodes that are not ready + notReadyNodes := []string{} + for _, node := range nodes { + for _, con := range node.Status.Conditions { + if con.Type == coreapiv1.NodeReady && con.Status != coreapiv1.ConditionTrue { + notReadyNodes = append(notReadyNodes, node.Name) + } + } + } + newCondition := operatorv1.OperatorCondition{ + Type: condition.NodeControllerDegradedConditionType, + } + if len(notReadyNodes) > 0 { + newCondition.Status = operatorv1.ConditionTrue + newCondition.Reason = "MasterNodesReady" + newCondition.Message = fmt.Sprintf("The master node(s) %q not ready", strings.Join(notReadyNodes, ",")) + } else { + newCondition.Status = operatorv1.ConditionFalse + newCondition.Reason = "MasterNodesReady" + newCondition.Message = "All master node(s) are ready" + } + + oldStatus := &operatorv1.StaticPodOperatorStatus{} + _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(newCondition), func(status *operatorv1.StaticPodOperatorStatus) error { + status.NodeStatuses = newTargetNodeStates + return nil + }, func(status *operatorv1.StaticPodOperatorStatus) error { + //a hack for storing the old status (before the update) + oldStatus = status + return nil + }) + + if updateError != nil { + return updateError + } + + if !updated { + return nil + } + + for _, oldCondition := range oldStatus.Conditions { + if oldCondition.Type == condition.NodeControllerDegradedConditionType && oldCondition.Message != newCondition.Message { + c.eventRecorder.Eventf("MasterNodesReadyChanged", newCondition.Message) + break + } + } + return nil +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *NodeController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting NodeController") + defer klog.Infof("Shutting down NodeController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *NodeController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *NodeController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *NodeController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(nodeControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(nodeControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(nodeControllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go new file mode 100644 index 00000000000..153cf0b4869 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go @@ -0,0 +1,271 @@ +package node + +import ( + "fmt" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func fakeMasterNode(name string) *corev1.Node { + n := &corev1.Node{} + n.Name = name + n.Labels = map[string]string{ + "node-role.kubernetes.io/master": "", + } + + return n +} + +func makeNodeNotReady(node *corev1.Node) *corev1.Node { + con := corev1.NodeCondition{} + con.Type = corev1.NodeReady + con.Status = corev1.ConditionFalse + node.Status.Conditions = append(node.Status.Conditions, con) + return node +} + +func validateCommonNodeControllerDegradedCondtion(con operatorv1.OperatorCondition) error { + if con.Type != condition.NodeControllerDegradedConditionType { + return fmt.Errorf("incorrect condition.type, expected NodeControllerDegraded, got %s", con.Type) + } + if con.Reason != "MasterNodesReady" { + return fmt.Errorf("incorrect condition.reason, expected MasterNodesReady, got %s", con.Reason) + } + return nil +} + +func TestNodeControllerDegradedConditionType(t *testing.T) { + scenarios := []struct { + name string + masterNodes []runtime.Object + evaluateNodeStatus func([]operatorv1.OperatorCondition) error + }{ + // scenario 1 + { + name: "scenario 1: one unhealthy master node is reported", + masterNodes: []runtime.Object{makeNodeNotReady(fakeMasterNode("test-node-1")), fakeMasterNode("test-node-2")}, + evaluateNodeStatus: func(conditions []operatorv1.OperatorCondition) error { + if len(conditions) != 1 { + return fmt.Errorf("expected exaclty 1 condition, got %d", len(conditions)) + } + + con := conditions[0] + if err := validateCommonNodeControllerDegradedCondtion(con); err != nil { + return err + } + if con.Status != operatorv1.ConditionTrue { + return fmt.Errorf("incorrect condition.status, expected %v, got %v", operatorv1.ConditionTrue, con.Status) + } + expectedMsg := "The master node(s) \"test-node-1\" not ready" + if con.Message != expectedMsg { + return fmt.Errorf("incorrect condition.message, expected %s, got %s", expectedMsg, con.Message) + } + return nil + }, + }, + + // scenario 2 + { + name: "scenario 2: all master nodes are healthy", + masterNodes: []runtime.Object{fakeMasterNode("test-node-1"), fakeMasterNode("test-node-2")}, + evaluateNodeStatus: func(conditions []operatorv1.OperatorCondition) error { + if len(conditions) != 1 { + return fmt.Errorf("expected exaclty 1 condition, got %d", len(conditions)) + } + + con := conditions[0] + if err := validateCommonNodeControllerDegradedCondtion(con); err != nil { + return err + } + if con.Status != operatorv1.ConditionFalse { + return fmt.Errorf("incorrect condition.status, expected %v, got %v", operatorv1.ConditionFalse, con.Status) + } + expectedMsg := "All master node(s) are ready" + if con.Message != expectedMsg { + return fmt.Errorf("incorrect condition.message, expected %s, got %s", expectedMsg, con.Message) + } + return nil + }, + }, + + // scenario 3 + { + name: "scenario 3: multiple master nodes are unhealthy", + masterNodes: []runtime.Object{makeNodeNotReady(fakeMasterNode("test-node-1")), fakeMasterNode("test-node-2"), makeNodeNotReady(fakeMasterNode("test-node-3"))}, + evaluateNodeStatus: func(conditions []operatorv1.OperatorCondition) error { + if len(conditions) != 1 { + return fmt.Errorf("expected exaclty 1 condition, got %d", len(conditions)) + } + + con := conditions[0] + if err := validateCommonNodeControllerDegradedCondtion(con); err != nil { + return err + } + if con.Status != operatorv1.ConditionTrue { + return fmt.Errorf("incorrect condition.status, expected %v, got %v", operatorv1.ConditionTrue, con.Status) + } + expectedMsg := "The master node(s) \"test-node-1,test-node-3\" not ready" + if con.Message != expectedMsg { + return fmt.Errorf("incorrect condition.message, expected %s, got %s", expectedMsg, con.Message) + } + return nil + }, + }, + } + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(scenario.masterNodes...) + fakeLister := v1helpers.NewFakeNodeLister(kubeClient) + kubeInformers := informers.NewSharedInformerFactory(kubeClient, 1*time.Minute) + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + }, + nil, + nil, + ) + + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + + c := NewNodeController(fakeStaticPodOperatorClient, kubeInformers, eventRecorder) + // override the lister so we don't have to run the informer to list nodes + c.nodeLister = fakeLister + if err := c.sync(); err != nil { + t.Fatal(err) + } + + _, status, _, _ := fakeStaticPodOperatorClient.GetStaticPodOperatorState() + + if err := scenario.evaluateNodeStatus(status.OperatorStatus.Conditions); err != nil { + t.Errorf("%s: failed to evaluate operator conditions: %v", scenario.name, err) + } + }) + + } +} + +func TestNewNodeController(t *testing.T) { + tests := []struct { + name string + startNodes []runtime.Object + startNodeStatus []operatorv1.NodeStatus + evaluateNodeStatus func([]operatorv1.NodeStatus) error + }{ + { + name: "single-node", + startNodes: []runtime.Object{fakeMasterNode("test-node-1")}, + evaluateNodeStatus: func(s []operatorv1.NodeStatus) error { + if len(s) != 1 { + return fmt.Errorf("expected 1 node status, got %d", len(s)) + } + if s[0].NodeName != "test-node-1" { + return fmt.Errorf("expected 'test-node-1' as node name, got %q", s[0].NodeName) + } + return nil + }, + }, + { + name: "multi-node", + startNodes: []runtime.Object{fakeMasterNode("test-node-1"), fakeMasterNode("test-node-2"), fakeMasterNode("test-node-3")}, + startNodeStatus: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + }, + }, + evaluateNodeStatus: func(s []operatorv1.NodeStatus) error { + if len(s) != 3 { + return fmt.Errorf("expected 3 node status, got %d", len(s)) + } + if s[0].NodeName != "test-node-1" { + return fmt.Errorf("expected first node to be test-node-1, got %q", s[0].NodeName) + } + if s[1].NodeName != "test-node-2" { + return fmt.Errorf("expected second node to be test-node-2, got %q", s[1].NodeName) + } + return nil + }, + }, + { + name: "single-node-removed", + startNodes: []runtime.Object{}, + startNodeStatus: []operatorv1.NodeStatus{ + { + NodeName: "lost-node", + }, + }, + evaluateNodeStatus: func(s []operatorv1.NodeStatus) error { + if len(s) != 0 { + return fmt.Errorf("expected no node status, got %d", len(s)) + } + return nil + }, + }, + { + name: "no-op", + startNodes: []runtime.Object{fakeMasterNode("test-node-1")}, + startNodeStatus: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + }, + }, + evaluateNodeStatus: func(s []operatorv1.NodeStatus) error { + if len(s) != 1 { + return fmt.Errorf("expected one node status, got %d", len(s)) + } + return nil + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(test.startNodes...) + fakeLister := v1helpers.NewFakeNodeLister(kubeClient) + kubeInformers := informers.NewSharedInformerFactory(kubeClient, 1*time.Minute) + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: test.startNodeStatus, + }, + nil, + nil, + ) + + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + + c := NewNodeController(fakeStaticPodOperatorClient, kubeInformers, eventRecorder) + // override the lister so we don't have to run the informer to list nodes + c.nodeLister = fakeLister + if err := c.sync(); err != nil { + t.Fatal(err) + } + + _, status, _, _ := fakeStaticPodOperatorClient.GetStaticPodOperatorState() + + if err := test.evaluateNodeStatus(status.NodeStatuses); err != nil { + t.Errorf("%s: failed to evaluate node status: %v", test.name, err) + } + }) + + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata/bindata.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata/bindata.go new file mode 100644 index 00000000000..dce0cd0bd86 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata/bindata.go @@ -0,0 +1,254 @@ +// Code generated by go-bindata. +// sources: +// pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml +// DO NOT EDIT! + +package bindata + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (fi bindataFileInfo) Name() string { + return fi.name +} +func (fi bindataFileInfo) Size() int64 { + return fi.size +} +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} +func (fi bindataFileInfo) IsDir() bool { + return false +} +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml = []byte(`apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: pruner +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: pruner + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir +`) + +func pkgOperatorStaticpodControllerPruneManifestsPrunerPodYamlBytes() ([]byte, error) { + return _pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml, nil +} + +func pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml() (*asset, error) { + bytes, err := pkgOperatorStaticpodControllerPruneManifestsPrunerPodYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml": pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "pkg": {nil, map[string]*bintree{ + "operator": {nil, map[string]*bintree{ + "staticpod": {nil, map[string]*bintree{ + "controller": {nil, map[string]*bintree{ + "prune": {nil, map[string]*bintree{ + "manifests": {nil, map[string]*bintree{ + "pruner-pod.yaml": {pkgOperatorStaticpodControllerPruneManifestsPrunerPodYaml, map[string]*bintree{}}, + }}, + }}, + }}, + }}, + }}, + }}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml new file mode 100644 index 00000000000..bae7d9c05b7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/manifests/pruner-pod.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +kind: Pod +metadata: + namespace: # Value set by operator + name: # Value set by operator + labels: + app: pruner +spec: + serviceAccountName: installer-sa + nodeName: # Value set by operator + containers: + - name: pruner + command: # Value set by operator + args: # Value set by operator + image: # Value set by operator + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + runAsUser: 0 + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes/ + name: kubelet-dir + restartPolicy: Never + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + securityContext: + runAsUser: 0 + volumes: + - hostPath: + path: /etc/kubernetes/ + name: kubelet-dir diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go new file mode 100644 index 00000000000..830851ebe6c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go @@ -0,0 +1,364 @@ +package prune + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/bindata" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// PruneController is a controller that watches static installer pod revision statuses and spawns +// a pruner pod to delete old revision resources from disk +type PruneController struct { + targetNamespace, podResourcePrefix string + // command is the string to use for the pruning pod command + command []string + + // prunerPodImageFn returns the image name for the pruning pod + prunerPodImageFn func() string + // ownerRefsFn sets the ownerrefs on the pruner pod + ownerRefsFn func(revision int32) ([]metav1.OwnerReference, error) + + operatorClient v1helpers.StaticPodOperatorClient + + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + podGetter corev1client.PodsGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +const ( + pruneControllerWorkQueueKey = "key" + statusConfigMapName = "revision-status-" + defaultRevisionLimit = int32(5) +) + +// NewPruneController creates a new pruning controller +func NewPruneController( + targetNamespace string, + podResourcePrefix string, + command []string, + configMapGetter corev1client.ConfigMapsGetter, + secretGetter corev1client.SecretsGetter, + podGetter corev1client.PodsGetter, + operatorClient v1helpers.StaticPodOperatorClient, + eventRecorder events.Recorder, +) *PruneController { + c := &PruneController{ + targetNamespace: targetNamespace, + podResourcePrefix: podResourcePrefix, + command: command, + + operatorClient: operatorClient, + + configMapGetter: configMapGetter, + secretGetter: secretGetter, + podGetter: podGetter, + eventRecorder: eventRecorder.WithComponentSuffix("prune-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "PruneController"), + prunerPodImageFn: getPrunerPodImageFromEnv, + } + + c.ownerRefsFn = c.setOwnerRefs + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +func getRevisionLimits(operatorSpec *operatorv1.StaticPodOperatorSpec) (int32, int32) { + failedRevisionLimit := defaultRevisionLimit + succeededRevisionLimit := defaultRevisionLimit + if operatorSpec.FailedRevisionLimit != 0 { + failedRevisionLimit = operatorSpec.FailedRevisionLimit + } + if operatorSpec.SucceededRevisionLimit != 0 { + succeededRevisionLimit = operatorSpec.SucceededRevisionLimit + } + return failedRevisionLimit, succeededRevisionLimit +} + +func (c *PruneController) excludedRevisionHistory(operatorStatus *operatorv1.StaticPodOperatorStatus, failedRevisionLimit, succeededRevisionLimit int32) ([]int, error) { + var succeededRevisions, failedRevisions, inProgressRevisions, unknownStatusRevisions []int + + configMaps, err := c.configMapGetter.ConfigMaps(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return []int{}, err + } + for _, configMap := range configMaps.Items { + if !strings.HasPrefix(configMap.Name, statusConfigMapName) { + continue + } + + if revision, ok := configMap.Data["revision"]; ok { + revisionNumber, err := strconv.Atoi(revision) + if err != nil { + return []int{}, err + } + switch configMap.Data["status"] { + case string(corev1.PodSucceeded): + succeededRevisions = append(succeededRevisions, revisionNumber) + case string(corev1.PodFailed): + failedRevisions = append(failedRevisions, revisionNumber) + + case "InProgress": + // we always protect inprogress + inProgressRevisions = append(inProgressRevisions, revisionNumber) + + default: + // protect things you don't understand + unknownStatusRevisions = append(unknownStatusRevisions, revisionNumber) + c.eventRecorder.Event("UnknownRevisionStatus", fmt.Sprintf("unknown status for revision %d: %v", revisionNumber, configMap.Data["status"])) + } + } + } + + // Return early if nothing to prune + if len(succeededRevisions)+len(failedRevisions) == 0 { + klog.V(2).Info("no revision IDs currently eligible to prune") + return []int{}, nil + } + + // Get list of protected IDs + protectedSucceededRevisions := protectedRevisions(succeededRevisions, int(succeededRevisionLimit)) + protectedFailedRevisions := protectedRevisions(failedRevisions, int(failedRevisionLimit)) + + excludedRevisions := make([]int, 0, len(protectedSucceededRevisions)+len(protectedFailedRevisions)+len(inProgressRevisions)+len(unknownStatusRevisions)) + excludedRevisions = append(excludedRevisions, protectedSucceededRevisions...) + excludedRevisions = append(excludedRevisions, protectedFailedRevisions...) + excludedRevisions = append(excludedRevisions, inProgressRevisions...) + excludedRevisions = append(excludedRevisions, unknownStatusRevisions...) + sort.Ints(excludedRevisions) + + // There should always be at least 1 excluded ID, otherwise we'll delete the current revision + if len(excludedRevisions) == 0 { + return []int{}, fmt.Errorf("need at least 1 excluded ID for revision pruning") + } + return excludedRevisions, nil +} + +func (c *PruneController) pruneDiskResources(operatorStatus *operatorv1.StaticPodOperatorStatus, excludedRevisions []int, maxEligibleRevision int) error { + // Run pruning pod on each node and pin it to that node + for _, nodeStatus := range operatorStatus.NodeStatuses { + // Use the highest value between CurrentRevision and LastFailedRevision + // Because CurrentRevision only updates on successful installs and we still prune on an unsuccessful install + if err := c.ensurePrunePod(nodeStatus.NodeName, maxEligibleRevision, excludedRevisions, max(nodeStatus.LastFailedRevision, nodeStatus.CurrentRevision)); err != nil { + return err + } + } + return nil +} + +func (c *PruneController) pruneAPIResources(excludedRevisions []int, maxEligibleRevision int) error { + protectedRevisions := sets.NewInt(excludedRevisions...) + statusConfigMaps, err := c.configMapGetter.ConfigMaps(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return err + } + for _, cm := range statusConfigMaps.Items { + if !strings.HasPrefix(cm.Name, statusConfigMapName) { + continue + } + + revision, err := strconv.Atoi(cm.Data["revision"]) + if err != nil { + return fmt.Errorf("unexpected error converting revision to int: %+v", err) + } + + if protectedRevisions.Has(revision) { + continue + } + if revision > maxEligibleRevision { + continue + } + if err := c.configMapGetter.ConfigMaps(c.targetNamespace).Delete(cm.Name, &metav1.DeleteOptions{}); err != nil { + return err + } + } + return nil +} + +func protectedRevisions(revisions []int, revisionLimit int) []int { + sort.Ints(revisions) + if len(revisions) == 0 { + return revisions + } + startKey := 0 + // We use -1 = unlimited revisions, so protect all. Limit shouldn't ever be literally 0 either + if revisionLimit > 0 && len(revisions) > revisionLimit { + startKey = len(revisions) - revisionLimit + } + return revisions[startKey:] +} + +func (c *PruneController) ensurePrunePod(nodeName string, maxEligibleRevision int, protectedRevisions []int, revision int32) error { + if revision == 0 { + return nil + } + pod := resourceread.ReadPodV1OrDie(bindata.MustAsset(filepath.Join("pkg/operator/staticpod/controller/prune", "manifests/pruner-pod.yaml"))) + + pod.Name = getPrunerPodName(nodeName, revision) + pod.Namespace = c.targetNamespace + pod.Spec.NodeName = nodeName + pod.Spec.Containers[0].Image = c.prunerPodImageFn() + pod.Spec.Containers[0].Command = c.command + pod.Spec.Containers[0].Args = append(pod.Spec.Containers[0].Args, + fmt.Sprintf("-v=%d", 4), + fmt.Sprintf("--max-eligible-revision=%d", maxEligibleRevision), + fmt.Sprintf("--protected-revisions=%s", revisionsToString(protectedRevisions)), + fmt.Sprintf("--resource-dir=%s", "/etc/kubernetes/static-pod-resources"), + fmt.Sprintf("--static-pod-name=%s", c.podResourcePrefix), + ) + + ownerRefs, err := c.ownerRefsFn(revision) + if err != nil { + return fmt.Errorf("unable to set pruner pod ownerrefs: %+v", err) + } + pod.OwnerReferences = ownerRefs + + _, _, err = resourceapply.ApplyPod(c.podGetter, c.eventRecorder, pod) + return err +} + +func (c *PruneController) setOwnerRefs(revision int32) ([]metav1.OwnerReference, error) { + ownerReferences := []metav1.OwnerReference{} + statusConfigMap, err := c.configMapGetter.ConfigMaps(c.targetNamespace).Get(fmt.Sprintf("revision-status-%d", revision), metav1.GetOptions{}) + if err == nil { + ownerReferences = append(ownerReferences, metav1.OwnerReference{ + APIVersion: "v1", + Kind: "ConfigMap", + Name: statusConfigMap.Name, + UID: statusConfigMap.UID, + }) + } + return ownerReferences, err +} + +func getPrunerPodName(nodeName string, revision int32) string { + return fmt.Sprintf("revision-pruner-%d-%s", revision, nodeName) +} + +func revisionsToString(revisions []int) string { + values := []string{} + for _, id := range revisions { + value := strconv.Itoa(id) + values = append(values, value) + } + return strings.Join(values, ",") +} + +func getPrunerPodImageFromEnv() string { + return os.Getenv("OPERATOR_IMAGE") +} + +func (c *PruneController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting PruneController") + defer klog.Infof("Shutting down PruneController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *PruneController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *PruneController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func (c *PruneController) sync() error { + klog.V(5).Info("Syncing revision pruner") + operatorSpec, operatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + failedLimit, succeededLimit := getRevisionLimits(operatorSpec) + + excludedRevisions, err := c.excludedRevisionHistory(operatorStatus, failedLimit, succeededLimit) + if err != nil { + return err + } + // if no IDs are excluded, then there is nothing to prune + if len(excludedRevisions) == 0 { + klog.Info("No excluded revisions to prune, skipping") + return nil + } + + errs := []error{} + if diskErr := c.pruneDiskResources(operatorStatus, excludedRevisions, excludedRevisions[len(excludedRevisions)-1]); diskErr != nil { + errs = append(errs, diskErr) + } + if apiErr := c.pruneAPIResources(excludedRevisions, excludedRevisions[len(excludedRevisions)-1]); apiErr != nil { + errs = append(errs, apiErr) + } + return v1helpers.NewMultiLineAggregate(errs) +} + +// eventHandler queues the operator to check spec and status +func (c *PruneController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(pruneControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(pruneControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(pruneControllerWorkQueueKey) }, + } +} + +func max(a, b int32) int32 { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go new file mode 100644 index 00000000000..58fec73ba54 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go @@ -0,0 +1,506 @@ +package prune + +import ( + "fmt" + "testing" + + "github.com/openshift/library-go/pkg/operator/v1helpers" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" +) + +type configMapInfo struct { + name string + namespace string + revision string + phase string +} + +func TestPruneAPIResources(t *testing.T) { + tests := []struct { + name string + targetNamespace string + failedLimit int32 + succeededLimit int32 + currentRevision int + configMaps []configMapInfo + testSecrets []string + testConfigs []string + startingObjects []runtime.Object + expectedObjects []runtime.Object + }{ + { + name: "prunes api resources based on limits set and status stored in configmap", + targetNamespace: "prune-api", + startingObjects: []runtime.Object{ + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "1", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "2", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-3", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodFailed), + "revision": "3", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-4", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodFailed), + "revision": "4", + }, + }, + }, + failedLimit: 1, + succeededLimit: 1, + expectedObjects: []runtime.Object{ + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "2", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-4", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodFailed), + "revision": "4", + }, + }, + }, + }, + { + name: "protects InProgress and unknown revision statuses", + targetNamespace: "prune-api", + startingObjects: []runtime.Object{ + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "1", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: "prune-api"}, + Data: map[string]string{ + "status": "foo", + "revision": "2", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-3", Namespace: "prune-api"}, + Data: map[string]string{ + "status": "InProgress", + "revision": "3", + }, + }, + }, + failedLimit: 1, + succeededLimit: 1, + expectedObjects: []runtime.Object{ + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "1", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: "prune-api"}, + Data: map[string]string{ + "status": "foo", + "revision": "2", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-3", Namespace: "prune-api"}, + Data: map[string]string{ + "status": "InProgress", + "revision": "3", + }, + }, + }, + }, + { + name: "protects all with unlimited revisions", + targetNamespace: "prune-api", + startingObjects: []runtime.Object{ + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "1", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "2", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-3", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "3", + }, + }, + }, + failedLimit: -1, + succeededLimit: -1, + expectedObjects: []runtime.Object{ + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "1", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "2", + }, + }, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status-3", Namespace: "prune-api"}, + Data: map[string]string{ + "status": string(v1.PodSucceeded), + "revision": "3", + }, + }, + }, + }, + } + for _, tc := range tests { + kubeClient := fake.NewSimpleClientset(tc.startingObjects...) + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + FailedRevisionLimit: tc.failedLimit, + SucceededRevisionLimit: tc.succeededLimit, + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 1, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + + operatorStatus := &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 1, + TargetRevision: 0, + }, + }, + } + + c := &PruneController{ + targetNamespace: tc.targetNamespace, + podResourcePrefix: "test-pod", + command: []string{"/bin/true"}, + configMapGetter: kubeClient.CoreV1(), + secretGetter: kubeClient.CoreV1(), + podGetter: kubeClient.CoreV1(), + eventRecorder: eventRecorder, + operatorClient: fakeStaticPodOperatorClient, + } + c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { + return []metav1.OwnerReference{}, nil + } + c.prunerPodImageFn = func() string { return "docker.io/foo/bar" } + + operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + t.Fatalf("unexpected error %q", err) + } + failedLimit, succeededLimit := getRevisionLimits(operatorSpec) + + excludedRevisions, err := c.excludedRevisionHistory(operatorStatus, failedLimit, succeededLimit) + if err != nil { + t.Fatalf("unexpected error %q", err) + } + if apiErr := c.pruneAPIResources(excludedRevisions, excludedRevisions[len(excludedRevisions)-1]); apiErr != nil { + t.Fatalf("unexpected error %q", apiErr) + } + + statusConfigMaps, err := c.configMapGetter.ConfigMaps(tc.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("unexpected error %q", err) + } + if len(statusConfigMaps.Items) != len(tc.expectedObjects) { + t.Errorf("expected objects %+v but got %+v", tc.expectedObjects, statusConfigMaps.Items) + } + } +} + +func TestPruneDiskResources(t *testing.T) { + tests := []struct { + name string + failedLimit int32 + succeededLimit int32 + maxEligibleRevision int + protectedRevisions string + configMaps []configMapInfo + expectedErr string + }{ + { + name: "creates prune pod appropriately", + configMaps: []configMapInfo{ + { + name: "revision-status-1", + namespace: "test", + revision: "1", + phase: string(v1.PodSucceeded), + }, + { + name: "revision-status-2", + namespace: "test", + revision: "2", + phase: string(v1.PodFailed), + }, + { + name: "revision-status-3", + namespace: "test", + revision: "3", + phase: string(v1.PodSucceeded), + }, + }, + maxEligibleRevision: 3, + protectedRevisions: "2,3", + failedLimit: 1, + succeededLimit: 1, + }, + + { + name: "defaults to unlimited revision history", + configMaps: []configMapInfo{ + { + name: "revision-status-1", + namespace: "test", + revision: "1", + phase: string(v1.PodSucceeded), + }, + { + name: "revision-status-2", + namespace: "test", + revision: "2", + phase: string(v1.PodFailed), + }, + { + name: "revision-status-3", + namespace: "test", + revision: "3", + phase: string(v1.PodSucceeded), + }, + }, + maxEligibleRevision: 3, + protectedRevisions: "1,2,3", + }, + + { + name: "protects unknown revision status", + configMaps: []configMapInfo{ + { + name: "revision-status-1", + namespace: "test", + revision: "1", + phase: string(v1.PodSucceeded), + }, + { + name: "revision-status-2", + namespace: "test", + revision: "2", + phase: "garbage", + }, + }, + maxEligibleRevision: 2, + protectedRevisions: "1,2", + }, + { + name: "handles revisions of only one type of phase", + configMaps: []configMapInfo{ + { + name: "revision-status-1", + namespace: "test", + revision: "1", + phase: string(v1.PodSucceeded), + }, + { + name: "revision-status-2", + namespace: "test", + revision: "2", + phase: string(v1.PodSucceeded), + }, + }, + maxEligibleRevision: 2, + protectedRevisions: "2", + failedLimit: 1, + succeededLimit: 1, + }, + { + name: "protects all with unlimited revisions", + configMaps: []configMapInfo{ + { + name: "revision-status-1", + namespace: "test", + revision: "1", + phase: string(v1.PodSucceeded), + }, + { + name: "revision-status-2", + namespace: "test", + revision: "2", + phase: string(v1.PodSucceeded), + }, + }, + maxEligibleRevision: 2, + protectedRevisions: "2", + failedLimit: 1, + succeededLimit: 1, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset() + + var prunerPod *v1.Pod + kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + prunerPod = action.(ktesting.CreateAction).GetObject().(*v1.Pod) + return false, nil, nil + }) + kubeClient.PrependReactor("list", "configmaps", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + return true, configMapList(test.configMaps), nil + }) + + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + FailedRevisionLimit: test.failedLimit, + SucceededRevisionLimit: test.succeededLimit, + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 1, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + + operatorStatus := &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 1, + TargetRevision: 0, + }, + }, + } + + c := &PruneController{ + targetNamespace: "test", + podResourcePrefix: "test-pod", + command: []string{"/bin/true"}, + configMapGetter: kubeClient.CoreV1(), + secretGetter: kubeClient.CoreV1(), + podGetter: kubeClient.CoreV1(), + eventRecorder: eventRecorder, + operatorClient: fakeStaticPodOperatorClient, + } + c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { + return []metav1.OwnerReference{}, nil + } + c.prunerPodImageFn = func() string { return "docker.io/foo/bar" } + + operatorSpec, _, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + t.Fatalf("unexpected error %q", err) + } + failedLimit, succeededLimit := getRevisionLimits(operatorSpec) + + excludedRevisions, err := c.excludedRevisionHistory(operatorStatus, failedLimit, succeededLimit) + if err != nil { + t.Fatalf("unexpected error %q", err) + } + if diskErr := c.pruneDiskResources(operatorStatus, excludedRevisions, excludedRevisions[len(excludedRevisions)-1]); diskErr != nil { + t.Fatalf("unexpected error %q", diskErr) + } + + if prunerPod == nil { + t.Fatalf("expected to create installer pod") + } + + if prunerPod.Spec.Containers[0].Image != "docker.io/foo/bar" { + t.Fatalf("expected docker.io/foo/bar image, got %q", prunerPod.Spec.Containers[0].Image) + } + + if prunerPod.Spec.Containers[0].Command[0] != "/bin/true" { + t.Fatalf("expected /bin/true as a command, got %q", prunerPod.Spec.Containers[0].Command[0]) + } + + expectedArgs := []string{ + "-v=4", + fmt.Sprintf("--max-eligible-revision=%d", test.maxEligibleRevision), + fmt.Sprintf("--protected-revisions=%s", test.protectedRevisions), + fmt.Sprintf("--resource-dir=%s", "/etc/kubernetes/static-pod-resources"), + fmt.Sprintf("--static-pod-name=%s", "test-pod"), + } + + if len(expectedArgs) != len(prunerPod.Spec.Containers[0].Args) { + t.Fatalf("expected arguments does not match container arguments: %#v != %#v", expectedArgs, prunerPod.Spec.Containers[0].Args) + } + + for i, v := range prunerPod.Spec.Containers[0].Args { + if expectedArgs[i] != v { + t.Errorf("arg[%d] expected %q, got %q", i, expectedArgs[i], v) + } + } + }) + } +} + +func configMapList(configMaps []configMapInfo) *v1.ConfigMapList { + items := make([]v1.ConfigMap, 0, len(configMaps)) + for _, cm := range configMaps { + configMap := v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cm.name, + Namespace: cm.namespace, + }, + Data: map[string]string{ + "revision": cm.revision, + "status": cm.phase, + }, + } + items = append(items, configMap) + } + + return &v1.ConfigMapList{Items: items} +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/compatibility.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/compatibility.go new file mode 100644 index 00000000000..296b9e2c04b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/compatibility.go @@ -0,0 +1,9 @@ +package revision + +import ( + "github.com/openshift/library-go/pkg/operator/revisioncontroller" +) + +// RevisionResource is an type alias to keep source code compatibility for old +// consumers of this type when it was just used for static pods. +type RevisionResource = revisioncontroller.RevisionResource diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go new file mode 100644 index 00000000000..0fcfcb1cefc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go @@ -0,0 +1,220 @@ +package staticpodstate + +import ( + "context" + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/status" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var ( + staticPodStateControllerWorkQueueKey = "key" +) + +// StaticPodStateController is a controller that watches static pods and will produce a failing status if the +//// static pods start crashing for some reason. +type StaticPodStateController struct { + targetNamespace string + staticPodName string + operandName string + operatorNamespace string + + operatorClient v1helpers.StaticPodOperatorClient + configMapGetter corev1client.ConfigMapsGetter + podsGetter corev1client.PodsGetter + versionRecorder status.VersionGetter + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewStaticPodStateController creates a controller that watches static pods and will produce a failing status if the +// static pods start crashing for some reason. +func NewStaticPodStateController( + targetNamespace, staticPodName, operatorNamespace, operandName string, + kubeInformersForTargetNamespace informers.SharedInformerFactory, + operatorClient v1helpers.StaticPodOperatorClient, + configMapGetter corev1client.ConfigMapsGetter, + podsGetter corev1client.PodsGetter, + versionRecorder status.VersionGetter, + eventRecorder events.Recorder, +) *StaticPodStateController { + c := &StaticPodStateController{ + targetNamespace: targetNamespace, + staticPodName: staticPodName, + operandName: operandName, + operatorNamespace: operatorNamespace, + + operatorClient: operatorClient, + configMapGetter: configMapGetter, + podsGetter: podsGetter, + versionRecorder: versionRecorder, + eventRecorder: eventRecorder.WithComponentSuffix("static-pod-state-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StaticPodStateController"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + + return c +} + +func (c *StaticPodStateController) sync() error { + operatorSpec, originalOperatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + errs := []error{} + failingErrorCount := 0 + images := sets.NewString() + for _, node := range originalOperatorStatus.NodeStatuses { + pod, err := c.podsGetter.Pods(c.targetNamespace).Get(mirrorPodNameForNode(c.staticPodName, node.NodeName), metav1.GetOptions{}) + if err != nil { + errs = append(errs, err) + failingErrorCount++ + continue + } + images.Insert(pod.Spec.Containers[0].Image) + + for _, containerStatus := range pod.Status.ContainerStatuses { + if !containerStatus.Ready { + // When container is not ready, we can't determine whether the operator is failing or not and every container will become not + // ready when created, so do not blip the failing state for it. + // We will still reflect the container not ready state in error conditions, but we don't set the operator as failed. + errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is not ready", node.NodeName, pod.Name, containerStatus.Name)) + } + if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Reason != "PodInitializing" { + errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is waiting: %q - %q", node.NodeName, pod.Name, containerStatus.Name, containerStatus.State.Waiting.Reason, containerStatus.State.Waiting.Message)) + failingErrorCount++ + } + if containerStatus.State.Terminated != nil { + // Containers can be terminated gracefully to trigger certificate reload, do not report these as failures. + errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is terminated: %q - %q", node.NodeName, pod.Name, containerStatus.Name, containerStatus.State.Terminated.Reason, containerStatus.State.Terminated.Message)) + // Only in case when the termination was caused by error. + if containerStatus.State.Terminated.ExitCode != 0 { + failingErrorCount++ + } + + } + } + } + + if len(images) == 0 { + c.eventRecorder.Warningf("MissingVersion", "no image found for operand pod") + } else if len(images) > 1 { + c.eventRecorder.Eventf("MultipleVersions", "multiple versions found, probably in transition: %v", strings.Join(images.List(), ",")) + } else { + c.versionRecorder.SetVersion( + c.operandName, + status.VersionForOperandFromEnv(), + ) + c.versionRecorder.SetVersion( + "operator", + status.VersionForOperatorFromEnv(), + ) + } + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: condition.StaticPodsDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + // Failing errors + if failingErrorCount > 0 { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = v1helpers.NewMultiLineAggregate(errs).Error() + } + // Not failing errors + if failingErrorCount == 0 && len(errs) > 0 { + cond.Reason = "Error" + cond.Message = v1helpers.NewMultiLineAggregate(errs).Error() + } + if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { + return updateError + } + + return err +} + +func mirrorPodNameForNode(staticPodName, nodeName string) string { + return staticPodName + "-" + nodeName +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *StaticPodStateController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting StaticPodStateController") + defer klog.Infof("Shutting down StaticPodStateController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *StaticPodStateController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *StaticPodStateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *StaticPodStateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(staticPodStateControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(staticPodStateControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(staticPodStateControllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go new file mode 100644 index 00000000000..da9d09f47a0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go @@ -0,0 +1,283 @@ +package staticpod + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/loglevel" + "github.com/openshift/library-go/pkg/operator/revisioncontroller" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/installer" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/node" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/prune" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate" + "github.com/openshift/library-go/pkg/operator/status" + "github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +type RunnableController interface { + Run(ctx context.Context, workers int) +} + +type staticPodOperatorControllerBuilder struct { + // clients and related + staticPodOperatorClient v1helpers.StaticPodOperatorClient + kubeClient kubernetes.Interface + kubeInformers v1helpers.KubeInformersForNamespaces + dynamicClient dynamic.Interface + eventRecorder events.Recorder + + // resource information + operandNamespace string + staticPodName string + revisionConfigMaps []revisioncontroller.RevisionResource + revisionSecrets []revisioncontroller.RevisionResource + + // cert information + certDir string + certConfigMaps []revisioncontroller.RevisionResource + certSecrets []revisioncontroller.RevisionResource + + // versioner information + versionRecorder status.VersionGetter + operatorNamespace string + operandName string + + // installer information + installCommand []string + + // pruning information + pruneCommand []string + // TODO de-dupe this. I think it's actually a directory name + staticPodPrefix string + + // TODO: remove this after all operators get rid of service monitor controller + enableServiceMonitorController bool +} + +func NewBuilder( + staticPodOperatorClient v1helpers.StaticPodOperatorClient, + kubeClient kubernetes.Interface, + kubeInformers v1helpers.KubeInformersForNamespaces, +) Builder { + return &staticPodOperatorControllerBuilder{ + staticPodOperatorClient: staticPodOperatorClient, + kubeClient: kubeClient, + kubeInformers: kubeInformers, + } +} + +// Builder allows the caller to construct a set of static pod controllers in pieces +type Builder interface { + WithEvents(eventRecorder events.Recorder) Builder + WithServiceMonitor(dynamicClient dynamic.Interface) Builder + WithVersioning(operatorNamespace, operandName string, versionRecorder status.VersionGetter) Builder + WithResources(operandNamespace, staticPodName string, revisionConfigMaps, revisionSecrets []revisioncontroller.RevisionResource) Builder + WithCerts(certDir string, certConfigMaps, certSecrets []revisioncontroller.RevisionResource) Builder + WithInstaller(command []string) Builder + WithPruning(command []string, staticPodPrefix string) Builder + ToControllers() (RunnableController, error) +} + +func (b *staticPodOperatorControllerBuilder) WithEvents(eventRecorder events.Recorder) Builder { + b.eventRecorder = eventRecorder + return b +} + +// DEPRECATED: We have moved all our operators now to have this manifest with customized content. +func (b *staticPodOperatorControllerBuilder) WithServiceMonitor(dynamicClient dynamic.Interface) Builder { + klog.Warning("DEPRECATED: MonitoringResourceController is no longer needed") + b.enableServiceMonitorController = true + b.dynamicClient = dynamicClient + return b +} + +func (b *staticPodOperatorControllerBuilder) WithVersioning(operatorNamespace, operandName string, versionRecorder status.VersionGetter) Builder { + b.operatorNamespace = operatorNamespace + b.operandName = operandName + b.versionRecorder = versionRecorder + return b +} + +func (b *staticPodOperatorControllerBuilder) WithResources(operandNamespace, staticPodName string, revisionConfigMaps, revisionSecrets []revisioncontroller.RevisionResource) Builder { + b.operandNamespace = operandNamespace + b.staticPodName = staticPodName + b.revisionConfigMaps = revisionConfigMaps + b.revisionSecrets = revisionSecrets + return b +} + +func (b *staticPodOperatorControllerBuilder) WithCerts(certDir string, certConfigMaps, certSecrets []revisioncontroller.RevisionResource) Builder { + b.certDir = certDir + b.certConfigMaps = certConfigMaps + b.certSecrets = certSecrets + return b +} + +func (b *staticPodOperatorControllerBuilder) WithInstaller(command []string) Builder { + b.installCommand = command + return b +} + +func (b *staticPodOperatorControllerBuilder) WithPruning(command []string, staticPodPrefix string) Builder { + b.pruneCommand = command + b.staticPodPrefix = staticPodPrefix + return b +} + +func (b *staticPodOperatorControllerBuilder) ToControllers() (RunnableController, error) { + controllers := &staticPodOperatorControllers{} + + eventRecorder := b.eventRecorder + if eventRecorder == nil { + eventRecorder = events.NewLoggingEventRecorder("static-pod-operator-controller") + } + versionRecorder := b.versionRecorder + if versionRecorder == nil { + versionRecorder = status.NewVersionGetter() + } + + configMapClient := v1helpers.CachedConfigMapGetter(b.kubeClient.CoreV1(), b.kubeInformers) + secretClient := v1helpers.CachedSecretGetter(b.kubeClient.CoreV1(), b.kubeInformers) + podClient := b.kubeClient.CoreV1() + eventsClient := b.kubeClient.CoreV1() + operandInformers := b.kubeInformers.InformersFor(b.operandNamespace) + clusterInformers := b.kubeInformers.InformersFor("") + + var errs []error + + if len(b.operandNamespace) > 0 { + controllers.add(revisioncontroller.NewRevisionController( + b.operandNamespace, + b.revisionConfigMaps, + b.revisionSecrets, + operandInformers, + revisioncontroller.StaticPodLatestRevisionClient{StaticPodOperatorClient: b.staticPodOperatorClient}, + configMapClient, + secretClient, + eventRecorder, + )) + } else { + errs = append(errs, fmt.Errorf("missing revisionController; cannot proceed")) + } + + if len(b.installCommand) > 0 { + controllers.add(installer.NewInstallerController( + b.operandNamespace, + b.staticPodName, + b.revisionConfigMaps, + b.revisionSecrets, + b.installCommand, + operandInformers, + b.staticPodOperatorClient, + configMapClient, + secretClient, + podClient, + eventRecorder, + ).WithCerts( + b.certDir, + b.certConfigMaps, + b.certSecrets, + )) + + controllers.add(installerstate.NewInstallerStateController( + operandInformers, + podClient, + eventsClient, + b.staticPodOperatorClient, + b.operandNamespace, + eventRecorder, + )) + } else { + errs = append(errs, fmt.Errorf("missing installerController; cannot proceed")) + } + + if len(b.operandName) > 0 { + // TODO add handling for operator configmap changes to get version-mapping changes + controllers.add(staticpodstate.NewStaticPodStateController( + b.operandNamespace, + b.staticPodName, + b.operatorNamespace, + b.operandName, + operandInformers, + b.staticPodOperatorClient, + configMapClient, + podClient, + versionRecorder, + eventRecorder, + )) + } else { + eventRecorder.Warning("StaticPodStateControllerMissing", "not enough information provided, not all functionality is present") + } + + if len(b.pruneCommand) > 0 { + controllers.add(prune.NewPruneController( + b.operandNamespace, + b.staticPodPrefix, + b.pruneCommand, + configMapClient, + secretClient, + podClient, + b.staticPodOperatorClient, + eventRecorder, + )) + } else { + eventRecorder.Warning("PruningControllerMissing", "not enough information provided, not all functionality is present") + } + + controllers.add(node.NewNodeController( + b.staticPodOperatorClient, + clusterInformers, + eventRecorder, + )) + + controllers.add(backingresource.NewBackingResourceController( + b.operandNamespace, + b.staticPodOperatorClient, + operandInformers, + b.kubeClient, + eventRecorder, + )) + + if b.dynamicClient != nil && b.enableServiceMonitorController { + controllers.add(monitoring.NewMonitoringResourceController( + b.operandNamespace, + b.operandNamespace, + b.staticPodOperatorClient, + operandInformers, + b.kubeClient, + b.dynamicClient, + eventRecorder, + )) + } + + controllers.add(unsupportedconfigoverridescontroller.NewUnsupportedConfigOverridesController(b.staticPodOperatorClient, eventRecorder)) + controllers.add(loglevel.NewClusterOperatorLoggingController(b.staticPodOperatorClient, eventRecorder)) + + return controllers, errors.NewAggregate(errs) +} + +type staticPodOperatorControllers struct { + controllers []RunnableController +} + +func (o *staticPodOperatorControllers) add(controller RunnableController) { + o.controllers = append(o.controllers, controller) +} + +func (o *staticPodOperatorControllers) Run(ctx context.Context, workers int) { + for i := range o.controllers { + go o.controllers[i].Run(ctx, workers) + } + + <-ctx.Done() +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go new file mode 100644 index 00000000000..11b5216b2eb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go @@ -0,0 +1,359 @@ +package installerpod + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/klog" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/library-go/pkg/operator/resource/retry" +) + +type InstallOptions struct { + // TODO replace with genericclioptions + KubeConfig string + KubeClient kubernetes.Interface + + Revision string + Namespace string + + PodConfigMapNamePrefix string + SecretNamePrefixes []string + OptionalSecretNamePrefixes []string + ConfigMapNamePrefixes []string + OptionalConfigMapNamePrefixes []string + + CertSecretNames []string + OptionalCertSecretNamePrefixes []string + CertConfigMapNamePrefixes []string + OptionalCertConfigMapNamePrefixes []string + + CertDir string + ResourceDir string + PodManifestDir string + + Timeout time.Duration + + PodMutationFns []PodMutationFunc +} + +// PodMutationFunc is a function that has a chance at changing the pod before it is created +type PodMutationFunc func(pod *corev1.Pod) error + +func NewInstallOptions() *InstallOptions { + return &InstallOptions{} +} + +func (o *InstallOptions) WithPodMutationFn(podMutationFn PodMutationFunc) *InstallOptions { + o.PodMutationFns = append(o.PodMutationFns, podMutationFn) + return o +} + +func NewInstaller() *cobra.Command { + o := NewInstallOptions() + + cmd := &cobra.Command{ + Use: "installer", + Short: "Install static pod and related resources", + Run: func(cmd *cobra.Command, args []string) { + klog.V(1).Info(cmd.Flags()) + klog.V(1).Info(spew.Sdump(o)) + + if err := o.Complete(); err != nil { + klog.Fatal(err) + } + if err := o.Validate(); err != nil { + klog.Fatal(err) + } + + ctx, cancel := context.WithTimeout(context.TODO(), o.Timeout) + defer cancel() + if err := o.Run(ctx); err != nil { + klog.Fatal(err) + } + }, + } + + o.AddFlags(cmd.Flags()) + + return cmd +} + +func (o *InstallOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "kubeconfig file or empty") + fs.StringVar(&o.Revision, "revision", o.Revision, "identifier for this particular installation instance. For example, a counter or a hash") + fs.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to retrieve all resources from and create the static pod in") + fs.StringVar(&o.PodConfigMapNamePrefix, "pod", o.PodConfigMapNamePrefix, "name of configmap that contains the pod to be created") + fs.StringSliceVar(&o.SecretNamePrefixes, "secrets", o.SecretNamePrefixes, "list of secret names to be included") + fs.StringSliceVar(&o.ConfigMapNamePrefixes, "configmaps", o.ConfigMapNamePrefixes, "list of configmaps to be included") + fs.StringSliceVar(&o.OptionalSecretNamePrefixes, "optional-secrets", o.OptionalSecretNamePrefixes, "list of optional secret names to be included") + fs.StringSliceVar(&o.OptionalConfigMapNamePrefixes, "optional-configmaps", o.OptionalConfigMapNamePrefixes, "list of optional configmaps to be included") + fs.StringVar(&o.ResourceDir, "resource-dir", o.ResourceDir, "directory for all files supporting the static pod manifest") + fs.StringVar(&o.PodManifestDir, "pod-manifest-dir", o.PodManifestDir, "directory for the static pod manifest") + fs.DurationVar(&o.Timeout, "timeout-duration", 120*time.Second, "maximum time in seconds to wait for the copying to complete (default: 2m)") + + fs.StringSliceVar(&o.CertSecretNames, "cert-secrets", o.CertSecretNames, "list of secret names to be included") + fs.StringSliceVar(&o.CertConfigMapNamePrefixes, "cert-configmaps", o.CertConfigMapNamePrefixes, "list of configmaps to be included") + fs.StringSliceVar(&o.OptionalCertSecretNamePrefixes, "optional-cert-secrets", o.OptionalCertSecretNamePrefixes, "list of optional secret names to be included") + fs.StringSliceVar(&o.OptionalCertConfigMapNamePrefixes, "optional-cert-configmaps", o.OptionalCertConfigMapNamePrefixes, "list of optional configmaps to be included") + fs.StringVar(&o.CertDir, "cert-dir", o.CertDir, "directory for all certs") +} + +func (o *InstallOptions) Complete() error { + clientConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfig, nil) + if err != nil { + return err + } + + // Use protobuf to fetch configmaps and secrets and create pods. + protoConfig := rest.CopyConfig(clientConfig) + protoConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoConfig.ContentType = "application/vnd.kubernetes.protobuf" + + o.KubeClient, err = kubernetes.NewForConfig(protoConfig) + if err != nil { + return err + } + return nil +} + +func (o *InstallOptions) Validate() error { + if len(o.Revision) == 0 { + return fmt.Errorf("--revision is required") + } + if len(o.Namespace) == 0 { + return fmt.Errorf("--namespace is required") + } + if len(o.PodConfigMapNamePrefix) == 0 { + return fmt.Errorf("--pod is required") + } + if len(o.ConfigMapNamePrefixes) == 0 { + return fmt.Errorf("--configmaps is required") + } + if o.Timeout == 0 { + return fmt.Errorf("--timeout-duration cannot be 0") + } + + if o.KubeClient == nil { + return fmt.Errorf("missing client") + } + + return nil +} + +func (o *InstallOptions) nameFor(prefix string) string { + return fmt.Sprintf("%s-%s", prefix, o.Revision) +} + +func (o *InstallOptions) prefixFor(name string) string { + return name[0 : len(name)-len(fmt.Sprintf("-%s", o.Revision))] +} + +func (o *InstallOptions) copySecretsAndConfigMaps(ctx context.Context, resourceDir string, + secretNames, optionalSecretNames, configNames, optionalConfigNames sets.String, prefixed bool) error { + klog.Infof("Creating target resource directory %q ...", resourceDir) + if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) { + return err + } + + // Gather secrets. If we get API server error, retry getting until we hit the timeout. + // Retrying will prevent temporary API server blips or networking issues. + // We return when all "required" secrets are gathered, optional secrets are not checked. + klog.Infof("Getting secrets ...") + secrets := []*corev1.Secret{} + for _, name := range append(secretNames.List(), optionalSecretNames.List()...) { + secret, err := o.getSecretWithRetry(ctx, name, optionalSecretNames.Has(name)) + if err != nil { + return err + } + // secret is nil means the secret was optional and we failed to get it. + if secret != nil { + secrets = append(secrets, secret) + } + } + + klog.Infof("Getting config maps ...") + configs := []*corev1.ConfigMap{} + for _, name := range append(configNames.List(), optionalConfigNames.List()...) { + config, err := o.getConfigMapWithRetry(ctx, name, optionalConfigNames.Has(name)) + if err != nil { + return err + } + // config is nil means the config was optional and we failed to get it. + if config != nil { + configs = append(configs, config) + } + } + + for _, secret := range secrets { + secretBaseName := secret.Name + if prefixed { + secretBaseName = o.prefixFor(secret.Name) + } + contentDir := path.Join(resourceDir, "secrets", secretBaseName) + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil { + return err + } + for filename, content := range secret.Data { + // TODO fix permissions + klog.Infof("Writing secret manifest %q ...", path.Join(contentDir, filename)) + if err := ioutil.WriteFile(path.Join(contentDir, filename), content, 0600); err != nil { + return err + } + } + } + for _, configmap := range configs { + configMapBaseName := configmap.Name + if prefixed { + configMapBaseName = o.prefixFor(configmap.Name) + } + contentDir := path.Join(resourceDir, "configmaps", configMapBaseName) + klog.Infof("Creating directory %q ...", contentDir) + if err := os.MkdirAll(contentDir, 0755); err != nil { + return err + } + for filename, content := range configmap.Data { + klog.Infof("Writing config file %q ...", path.Join(contentDir, filename)) + if err := ioutil.WriteFile(path.Join(contentDir, filename), []byte(content), 0644); err != nil { + return err + } + } + } + + return nil +} + +func (o *InstallOptions) copyContent(ctx context.Context) error { + resourceDir := path.Join(o.ResourceDir, o.nameFor(o.PodConfigMapNamePrefix)) + klog.Infof("Creating target resource directory %q ...", resourceDir) + if err := os.MkdirAll(resourceDir, 0755); err != nil && !os.IsExist(err) { + return err + } + + secretPrefixes := sets.NewString() + optionalSecretPrefixes := sets.NewString() + configPrefixes := sets.NewString() + optionalConfigPrefixes := sets.NewString() + for _, prefix := range o.SecretNamePrefixes { + secretPrefixes.Insert(o.nameFor(prefix)) + } + for _, prefix := range o.OptionalSecretNamePrefixes { + optionalSecretPrefixes.Insert(o.nameFor(prefix)) + } + for _, prefix := range o.ConfigMapNamePrefixes { + configPrefixes.Insert(o.nameFor(prefix)) + } + for _, prefix := range o.OptionalConfigMapNamePrefixes { + optionalConfigPrefixes.Insert(o.nameFor(prefix)) + } + if err := o.copySecretsAndConfigMaps(ctx, resourceDir, secretPrefixes, optionalSecretPrefixes, configPrefixes, optionalConfigPrefixes, true); err != nil { + return err + } + + // Copy the current state of the certs as we see them. This primes us once and allows a kube-apiserver to start once + if len(o.CertDir) > 0 { + if err := o.copySecretsAndConfigMaps(ctx, o.CertDir, + sets.NewString(o.CertSecretNames...), + sets.NewString(o.OptionalCertSecretNamePrefixes...), + sets.NewString(o.CertConfigMapNamePrefixes...), + sets.NewString(o.OptionalCertConfigMapNamePrefixes...), + false, + ); err != nil { + return err + } + } + + // Gather pod yaml from config map + var podContent string + + err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) { + klog.Infof("Getting pod configmaps/%s -n %s", o.nameFor(o.PodConfigMapNamePrefix), o.Namespace) + podConfigMap, err := o.KubeClient.CoreV1().ConfigMaps(o.Namespace).Get(o.nameFor(o.PodConfigMapNamePrefix), metav1.GetOptions{}) + if err != nil { + return false, err + } + podData, exists := podConfigMap.Data["pod.yaml"] + if !exists { + return true, fmt.Errorf("required 'pod.yaml' key does not exist in configmap") + } + podContent = strings.Replace(podData, "REVISION", o.Revision, -1) + return true, nil + }) + if err != nil { + return err + } + + // Write secrets, config maps and pod to disk + // This does not need timeout, instead we should fail hard when we are not able to write. + + podFileName := o.PodConfigMapNamePrefix + ".yaml" + klog.Infof("Writing pod manifest %q ...", path.Join(resourceDir, podFileName)) + if err := ioutil.WriteFile(path.Join(resourceDir, podFileName), []byte(podContent), 0644); err != nil { + return err + } + + // copy static pod + klog.Infof("Creating directory for static pod manifest %q ...", o.PodManifestDir) + if err := os.MkdirAll(o.PodManifestDir, 0755); err != nil { + return err + } + + for _, fn := range o.PodMutationFns { + klog.V(2).Infof("Customizing static pod ...") + pod := resourceread.ReadPodV1OrDie([]byte(podContent)) + if err := fn(pod); err != nil { + return err + } + podContent = resourceread.WritePodV1OrDie(pod) + } + + klog.Infof("Writing static pod manifest %q ...\n%s", path.Join(o.PodManifestDir, podFileName), podContent) + if err := ioutil.WriteFile(path.Join(o.PodManifestDir, podFileName), []byte(podContent), 0644); err != nil { + return err + } + + return nil +} + +func (o *InstallOptions) Run(ctx context.Context) error { + var eventTarget *corev1.ObjectReference + + err := retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) { + var clientErr error + eventTarget, clientErr = events.GetControllerReferenceForCurrentPod(o.KubeClient, o.Namespace, nil) + if clientErr != nil { + return false, clientErr + } + return true, nil + }) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + + recorder := events.NewRecorder(o.KubeClient.CoreV1().Events(o.Namespace), "static-pod-installer", eventTarget) + if err := o.copyContent(ctx); err != nil { + recorder.Warningf("StaticPodInstallerFailed", "Installing revision %s: %v", o.Revision, err) + return fmt.Errorf("failed to copy: %v", err) + } + + recorder.Eventf("StaticPodInstallerCompleted", "Successfully installed revision %s", o.Revision) + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd_test.go new file mode 100644 index 00000000000..d70d75cb1b6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd_test.go @@ -0,0 +1,221 @@ +package installerpod + +import ( + "context" + "io/ioutil" + "os" + "path" + "reflect" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +const podYaml = ` +apiVersion: v1 +kind: Pod +metadata: + namespace: some-ns + name: kube-apiserver-pod +spec: +` + +func TestCopyContent(t *testing.T) { + tests := []struct { + name string + + o InstallOptions + client func() *fake.Clientset + + expectedErr string + expected func(t *testing.T, resourceDir, podDir string) + }{ + { + name: "basic", + o: InstallOptions{ + Revision: "006", + Namespace: "some-ns", + PodConfigMapNamePrefix: "kube-apiserver-pod", + SecretNamePrefixes: []string{"first", "second"}, + ConfigMapNamePrefixes: []string{"alpha", "bravo"}, + }, + client: func() *fake.Clientset { + return fake.NewSimpleClientset( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "first-006"}, + Data: map[string][]byte{ + "one-A.crt": []byte("one"), + "two-A.crt": []byte("two"), + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "second-006"}, + Data: map[string][]byte{ + "uno-B.crt": []byte("uno"), + "dos-B.crt": []byte("dos"), + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "alpha-006"}, + Data: map[string]string{ + "apple-A.crt": "apple", + "banana-A.crt": "banana", + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "bravo-006"}, + Data: map[string]string{ + "manzana-B.crt": "manzana", + "platano-B.crt": "platano", + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "kube-apiserver-pod-006"}, + Data: map[string]string{ + "pod.yaml": podYaml, + }, + }, + ) + }, + expected: func(t *testing.T, resourceDir, podDir string) { + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "first", "one-A.crt"), "one") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "first", "two-A.crt"), "two") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "second", "uno-B.crt"), "uno") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "second", "dos-B.crt"), "dos") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "alpha", "apple-A.crt"), "apple") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "alpha", "banana-A.crt"), "banana") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "bravo", "manzana-B.crt"), "manzana") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "bravo", "platano-B.crt"), "platano") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "kube-apiserver-pod.yaml"), podYaml) + checkFileContent(t, path.Join(podDir, "kube-apiserver-pod.yaml"), podYaml) + }, + }, + { + name: "optional-secrets-confmaps", + o: InstallOptions{ + Revision: "006", + Namespace: "some-ns", + PodConfigMapNamePrefix: "kube-apiserver-pod", + SecretNamePrefixes: []string{"first", "second"}, + OptionalSecretNamePrefixes: []string{"third", "fourth"}, + ConfigMapNamePrefixes: []string{"alpha", "bravo"}, + OptionalConfigMapNamePrefixes: []string{"charlie", "delta"}, + }, + client: func() *fake.Clientset { + return fake.NewSimpleClientset( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "first-006"}, + Data: map[string][]byte{ + "one-A.crt": []byte("one"), + "two-A.crt": []byte("two"), + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "second-006"}, + Data: map[string][]byte{ + "uno-B.crt": []byte("uno"), + "dos-B.crt": []byte("dos"), + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "third-006"}, + Data: map[string][]byte{ + "tres-C.crt": []byte("tres"), + "cuatro-C.crt": []byte("cuatro"), + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "alpha-006"}, + Data: map[string]string{ + "apple-A.crt": "apple", + "banana-A.crt": "banana", + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "bravo-006"}, + Data: map[string]string{ + "manzana-B.crt": "manzana", + "platano-B.crt": "platano", + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "charlie-006"}, + Data: map[string]string{ + "apple-C.crt": "apple", + "banana-C.crt": "banana", + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "kube-apiserver-pod-006"}, + Data: map[string]string{ + "pod.yaml": podYaml, + }, + }, + ) + }, + expected: func(t *testing.T, resourceDir, podDir string) { + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "first", "one-A.crt"), "one") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "first", "two-A.crt"), "two") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "second", "uno-B.crt"), "uno") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "second", "dos-B.crt"), "dos") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "third", "tres-C.crt"), "tres") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "secrets", "third", "cuatro-C.crt"), "cuatro") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "alpha", "apple-A.crt"), "apple") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "alpha", "banana-A.crt"), "banana") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "bravo", "manzana-B.crt"), "manzana") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "bravo", "platano-B.crt"), "platano") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "charlie", "apple-C.crt"), "apple") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "configmaps", "charlie", "banana-C.crt"), "banana") + checkFileContent(t, path.Join(resourceDir, "kube-apiserver-pod-006", "kube-apiserver-pod.yaml"), podYaml) + checkFileContent(t, path.Join(podDir, "kube-apiserver-pod.yaml"), podYaml) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testDir, err := ioutil.TempDir("", "copy-content-test") + if err != nil { + t.Fatal(err) + } + defer func() { + os.Remove(testDir) + }() + + o := test.o + o.KubeClient = test.client() + o.ResourceDir = path.Join(testDir, "resources") + o.PodManifestDir = path.Join(testDir, "static-pods") + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + err = o.copyContent(ctx) + switch { + case err == nil && len(test.expectedErr) == 0: + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing %q", test.expectedErr) + case err != nil && !strings.Contains(err.Error(), test.expectedErr): + t.Fatalf("expected %q, got %q", test.expectedErr, err.Error()) + } + test.expected(t, o.ResourceDir, o.PodManifestDir) + }) + } +} + +func checkFileContent(t *testing.T, file, expected string) { + actual, err := ioutil.ReadFile(file) + if err != nil { + t.Error(err) + return + } + + if reflect.DeepEqual(expected, actual) { + t.Errorf("%q: expected %q, got %q", file, expected, string(actual)) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy.go new file mode 100644 index 00000000000..390763b8839 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy.go @@ -0,0 +1,67 @@ +package installerpod + +import ( + "golang.org/x/net/context" + "k8s.io/klog" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/library-go/pkg/operator/resource/retry" +) + +// getSecretWithRetry will attempt to get the secret from the API server and retry on any connection errors until +// the context is not done or secret is returned or a HTTP client error is returned. +// In case the optional flag is set, the 404 error is not reported and a nil object is returned instead. +func (o *InstallOptions) getSecretWithRetry(ctx context.Context, name string, isOptional bool) (*v1.Secret, error) { + var secret *v1.Secret + + err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) { + var clientErr error + secret, clientErr = o.KubeClient.CoreV1().Secrets(o.Namespace).Get(name, metav1.GetOptions{}) + if clientErr != nil { + klog.Infof("Failed to get secret %s/%s: %v", o.Namespace, name, clientErr) + return false, clientErr + } + return true, nil + }) + + switch { + case err == nil: + klog.Infof("Got secret %s/%s", o.Namespace, name) + return secret, nil + case errors.IsNotFound(err) && isOptional: + return nil, nil + default: + return nil, err + } + +} + +// getConfigMapWithRetry will attempt to get the configMap from the API server and retry on any connection errors until +// the context is not done or configMap is returned or a HTTP client error is returned. +// In case the optional flag is set, the 404 error is not reported and a nil object is returned instead. +func (o *InstallOptions) getConfigMapWithRetry(ctx context.Context, name string, isOptional bool) (*v1.ConfigMap, error) { + var config *v1.ConfigMap + + err := retry.RetryOnConnectionErrors(ctx, func(ctx context.Context) (bool, error) { + var clientErr error + config, clientErr = o.KubeClient.CoreV1().ConfigMaps(o.Namespace).Get(name, metav1.GetOptions{}) + if clientErr != nil { + klog.Infof("Failed to get config map %s/%s: %v", o.Namespace, name, clientErr) + return false, clientErr + } + return true, nil + }) + + switch { + case err == nil: + klog.Infof("Got configMap %s/%s", o.Namespace, name) + return config, nil + case errors.IsNotFound(err) && isOptional: + return nil, nil + default: + return nil, err + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy_test.go new file mode 100644 index 00000000000..c24b7a37c86 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/copy_test.go @@ -0,0 +1,135 @@ +package installerpod + +import ( + "context" + "fmt" + "testing" + "time" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + ktesting "k8s.io/client-go/testing" +) + +func addFakeSecret(name string) *v1.Secret { + secret := &v1.Secret{} + secret.Name = name + secret.Namespace = "test" + return secret +} + +func TestGetSecretWithRetry(t *testing.T) { + tests := []struct { + name string + getSecretName string + secrets []runtime.Object + optional bool + expectErr bool + expectRetries bool + sendInternalError bool + }{ + { + name: "required secret exists", + secrets: []runtime.Object{addFakeSecret("test-secret")}, + getSecretName: "test-secret", + }, + { + name: "optional secret does not exists and we not expect error", + secrets: []runtime.Object{}, + getSecretName: "test-secret", + optional: true, + expectRetries: false, + expectErr: false, + }, + { + name: "required secret does not exists and no retry on not found error", + secrets: []runtime.Object{}, + getSecretName: "test-secret", + expectRetries: false, + expectErr: true, + }, + { + name: "required secret exists and we retry on internal error", + secrets: []runtime.Object{addFakeSecret("test-secret")}, + getSecretName: "test-secret", + expectRetries: true, + sendInternalError: true, + expectErr: false, + }, + { + name: "optional secret does not exists and we not retry on internal error", + secrets: []runtime.Object{}, + getSecretName: "test-secret", + optional: true, + sendInternalError: true, + expectRetries: true, + expectErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := fake.NewSimpleClientset(test.secrets...) + ctx := context.TODO() + internalErrorChan := make(chan struct{}) + + client.PrependReactor("get", "secrets", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { + getAction := action.(ktesting.GetAction) + if getAction.GetName() != test.getSecretName { + return false, nil, nil + } + + // Send 500 error. Closing the channel means we remove this reactor from the reaction chain. + if test.sendInternalError { + close(internalErrorChan) + return true, nil, errors.NewInternalError(fmt.Errorf("test")) + } else { + return false, nil, nil + } + }) + timeoutContext, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + ctx = timeoutContext + + options := &InstallOptions{KubeClient: client, Namespace: "test"} + + // If we have test that send internal error, wait for the internal error to be send and then remove the + // reactor immediately. This should cause the client to retry and we observe that retries in actions. + if test.sendInternalError { + go func(c *fake.Clientset) { + <-internalErrorChan + c.Lock() + defer c.Unlock() + c.ReactionChain = c.ReactionChain[1 : len(c.ReactionChain)-1] + }(client) + } + + _, err := options.getSecretWithRetry(ctx, test.getSecretName, test.optional) + switch { + case err != nil && !test.expectErr: + t.Errorf("unexpected error: %v", err) + return + case err == nil && test.expectErr: + t.Errorf("expected error, got none") + return + } + + // -1 means that we get 0 if we only got 1 request (which is ok if we don't expect any retries) + retries := -1 + for _, action := range client.Actions() { + if action.GetVerb() != "get" || action.GetResource().Resource != "secrets" { + continue + } + retries++ + } + switch { + case retries > 0 && !test.expectRetries: + t.Errorf("expected no retries, but got %d retries", retries) + case retries == 0 && test.expectRetries: + t.Error("expected retries, but got none") + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go new file mode 100644 index 00000000000..18ba33178b1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd.go @@ -0,0 +1,116 @@ +package prune + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strconv" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/util/sets" +) + +type PruneOptions struct { + MaxEligibleRevision int + ProtectedRevisions []int + + ResourceDir string + StaticPodName string +} + +func NewPruneOptions() *PruneOptions { + return &PruneOptions{} +} + +func NewPrune() *cobra.Command { + o := NewPruneOptions() + + cmd := &cobra.Command{ + Use: "prune", + Short: "Prune static pod installer revisions", + Run: func(cmd *cobra.Command, args []string) { + klog.V(1).Info(cmd.Flags()) + klog.V(1).Info(spew.Sdump(o)) + + if err := o.Validate(); err != nil { + klog.Fatal(err) + } + if err := o.Run(); err != nil { + klog.Fatal(err) + } + }, + } + + o.AddFlags(cmd.Flags()) + + return cmd +} + +func (o *PruneOptions) AddFlags(fs *pflag.FlagSet) { + fs.IntVar(&o.MaxEligibleRevision, "max-eligible-revision", o.MaxEligibleRevision, "highest revision ID to be eligible for pruning") + fs.IntSliceVar(&o.ProtectedRevisions, "protected-revisions", o.ProtectedRevisions, "list of revision IDs to preserve (not delete)") + fs.StringVar(&o.ResourceDir, "resource-dir", o.ResourceDir, "directory for all files supporting the static pod manifest") + fs.StringVar(&o.StaticPodName, "static-pod-name", o.StaticPodName, "name of the static pod") +} + +func (o *PruneOptions) Validate() error { + if len(o.ResourceDir) == 0 { + return fmt.Errorf("--resource-dir is required") + } + if o.MaxEligibleRevision == 0 { + return fmt.Errorf("--max-eligible-revision is required") + } + if len(o.StaticPodName) == 0 { + return fmt.Errorf("--static-pod-name is required") + } + + return nil +} + +func (o *PruneOptions) Run() error { + protectedIDs := sets.NewInt(o.ProtectedRevisions...) + + files, err := ioutil.ReadDir(o.ResourceDir) + if err != nil { + return err + } + + for _, file := range files { + // If the file is not a resource directory... + if !file.IsDir() { + continue + } + // And doesn't match our static pod prefix... + if !strings.HasPrefix(file.Name(), o.StaticPodName) { + continue + } + + // Split file name to get just the integer revision ID + fileSplit := strings.Split(file.Name(), o.StaticPodName+"-") + revisionID, err := strconv.Atoi(fileSplit[len(fileSplit)-1]) + if err != nil { + return err + } + + // And is not protected... + if protected := protectedIDs.Has(revisionID); protected { + continue + } + // And is less than or equal to the maxEligibleRevisionID + if revisionID > o.MaxEligibleRevision { + continue + } + + err = os.RemoveAll(path.Join(o.ResourceDir, file.Name())) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd_test.go new file mode 100644 index 00000000000..3e06a1a4f9a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/prune/cmd_test.go @@ -0,0 +1,103 @@ +package prune + +import ( + "io/ioutil" + "os" + "path" + "reflect" + "sort" + "testing" + + "vbom.ml/util/sortorder" +) + +func TestRun(t *testing.T) { + tests := []struct { + name string + o PruneOptions + files []string + expected []string + }{ + { + name: "only deletes non-protected revisions of the specified pod", + o: PruneOptions{ + MaxEligibleRevision: 3, + ProtectedRevisions: []int{3, 2}, + StaticPodName: "test", + }, + files: []string{"test-1", "test-2", "test-3", "othertest-4"}, + expected: []string{"test-2", "test-3", "othertest-4"}, + }, + { + name: "doesn't delete anything higher than highest eligible revision", + o: PruneOptions{ + MaxEligibleRevision: 2, + ProtectedRevisions: []int{2}, + StaticPodName: "test", + }, + files: []string{"test-1", "test-2", "test-3"}, + expected: []string{"test-2", "test-3"}, + }, + { + name: "revision numbers do not conflict between pods when detecting protected IDs", + o: PruneOptions{ + MaxEligibleRevision: 2, + ProtectedRevisions: []int{2}, + StaticPodName: "test", + }, + files: []string{"test-1", "test-2", "othertest-1", "othertest-2"}, + expected: []string{"test-2", "othertest-1", "othertest-2"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + testDir, err := ioutil.TempDir("", "prune-revisions-test") + if err != nil { + t.Fatal(err) + } + defer func() { + os.Remove(testDir) + }() + + resourceDir := path.Join(testDir, "resources") + err = os.Mkdir(resourceDir, os.ModePerm) + if err != nil { + t.Error(err) + } + for _, file := range test.files { + err = os.Mkdir(path.Join(resourceDir, file), os.ModePerm) + if err != nil { + t.Error(err) + } + } + + o := test.o + o.ResourceDir = resourceDir + + err = o.Run() + if err != nil { + t.Error(err) + } + checkPruned(t, o.ResourceDir, test.expected) + }) + } +} + +func checkPruned(t *testing.T, resourceDir string, expected []string) { + files, err := ioutil.ReadDir(resourceDir) + if err != nil { + t.Error(err) + } + actual := make([]string, 0, len(files)) + for _, file := range files { + actual = append(actual, file.Name()) + } + + sort.Sort(sortorder.Natural(expected)) + sort.Sort(sortorder.Natural(actual)) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("expected %+v, got %+v", expected, actual) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go new file mode 100644 index 00000000000..f8dde57b9ae --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go @@ -0,0 +1,140 @@ +package status + +import ( + "fmt" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// unionCondition returns a single cluster operator condition that is the union of multiple operator conditions. +func unionCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return internalUnionCondition(conditionType, defaultConditionStatus, false, allConditions...) +} + +// unionInertialCondition returns a single cluster operator condition that is the union of multiple operator conditions, +// but resists returning a condition with a status opposite the defaultConditionStatus. +func unionInertialCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return internalUnionCondition(conditionType, defaultConditionStatus, true, allConditions...) +} + +// internalUnionCondition returns a single cluster operator condition that is the union of multiple operator conditions. +// +// defaultConditionStatus indicates whether you want to merge all Falses or merge all Trues. For instance, Failures merge +// on true, but Available merges on false. Thing of it like an anti-default. +// +// If hasInertia, then resist returning a condition with a status opposite the defaultConditionStatus. +func internalUnionCondition(conditionType string, defaultConditionStatus operatorv1.ConditionStatus, hasInertia bool, allConditions ...operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + var oppositeConditionStatus operatorv1.ConditionStatus + if defaultConditionStatus == operatorv1.ConditionTrue { + oppositeConditionStatus = operatorv1.ConditionFalse + } else { + oppositeConditionStatus = operatorv1.ConditionTrue + } + + interestingConditions := []operatorv1.OperatorCondition{} + badConditions := []operatorv1.OperatorCondition{} + badConditionStatus := operatorv1.ConditionUnknown + for _, condition := range allConditions { + if strings.HasSuffix(condition.Type, conditionType) { + interestingConditions = append(interestingConditions, condition) + + if condition.Status != defaultConditionStatus { + badConditions = append(badConditions, condition) + if condition.Status == oppositeConditionStatus { + badConditionStatus = oppositeConditionStatus + } + } + } + } + + unionedCondition := operatorv1.OperatorCondition{Type: conditionType, Status: operatorv1.ConditionUnknown} + if len(interestingConditions) == 0 { + unionedCondition.Status = operatorv1.ConditionUnknown + unionedCondition.Reason = "NoData" + return OperatorConditionToClusterOperatorCondition(unionedCondition) + } + + // This timeout needs to be longer than the delay in kube-apiserver after setting not ready and before we stop serving. + // That delay use to be 30 seconds, but we switched it to 70 seconds to reflect the reality on AWS. + twoMinutesAgo := time.Now().Add(-2 * time.Minute) + earliestBadConditionNotOldEnough := earliestTransitionTime(badConditions).Time.After(twoMinutesAgo) + if len(badConditions) == 0 || (hasInertia && earliestBadConditionNotOldEnough) { + unionedCondition.Status = defaultConditionStatus + unionedCondition.Message = unionMessage(interestingConditions) + unionedCondition.Reason = "AsExpected" + unionedCondition.LastTransitionTime = latestTransitionTime(interestingConditions) + + return OperatorConditionToClusterOperatorCondition(unionedCondition) + } + + // at this point we have bad conditions + unionedCondition.Status = badConditionStatus + unionedCondition.Message = unionMessage(badConditions) + unionedCondition.Reason = unionReason(badConditions) + unionedCondition.LastTransitionTime = latestTransitionTime(badConditions) + + return OperatorConditionToClusterOperatorCondition(unionedCondition) +} + +func latestTransitionTime(conditions []operatorv1.OperatorCondition) metav1.Time { + latestTransitionTime := metav1.Time{} + for _, condition := range conditions { + if latestTransitionTime.Before(&condition.LastTransitionTime) { + latestTransitionTime = condition.LastTransitionTime + } + } + return latestTransitionTime +} + +func earliestTransitionTime(conditions []operatorv1.OperatorCondition) metav1.Time { + earliestTransitionTime := metav1.Now() + for _, condition := range conditions { + if !earliestTransitionTime.Before(&condition.LastTransitionTime) { + earliestTransitionTime = condition.LastTransitionTime + } + } + return earliestTransitionTime +} + +func uniq(s []string) []string { + seen := make(map[string]struct{}, len(s)) + j := 0 + for _, v := range s { + if _, ok := seen[v]; ok { + continue + } + seen[v] = struct{}{} + s[j] = v + j++ + } + return s[:j] +} + +func unionMessage(conditions []operatorv1.OperatorCondition) string { + messages := []string{} + for _, condition := range conditions { + if len(condition.Message) == 0 { + continue + } + for _, message := range uniq(strings.Split(condition.Message, "\n")) { + messages = append(messages, fmt.Sprintf("%s: %s", condition.Type, message)) + } + } + return strings.Join(messages, "\n") +} + +func unionReason(conditions []operatorv1.OperatorCondition) string { + if len(conditions) == 1 { + if len(conditions[0].Reason) != 0 { + return conditions[0].Type + conditions[0].Reason + } + return conditions[0].Type + } else { + return "MultipleConditionsMatching" + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go new file mode 100644 index 00000000000..dbbf1f06e67 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go @@ -0,0 +1,252 @@ +package status + +import ( + "context" + "fmt" + "strings" + "time" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + configv1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +var workQueueKey = "instance" + +type VersionGetter interface { + // SetVersion is a way to set the version for an operand. It must be thread-safe + SetVersion(operandName, version string) + // GetVersion is way to get the versions for all operands. It must be thread-safe and return an object that doesn't mutate + GetVersions() map[string]string + // VersionChangedChannel is a channel that will get an item whenever SetVersion has been called + VersionChangedChannel() <-chan struct{} +} + +type StatusSyncer struct { + clusterOperatorName string + relatedObjects []configv1.ObjectReference + + versionGetter VersionGetter + operatorClient operatorv1helpers.OperatorClient + clusterOperatorClient configv1client.ClusterOperatorsGetter + clusterOperatorLister configv1listers.ClusterOperatorLister + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +func NewClusterOperatorStatusController( + name string, + relatedObjects []configv1.ObjectReference, + clusterOperatorClient configv1client.ClusterOperatorsGetter, + clusterOperatorInformer configv1informers.ClusterOperatorInformer, + operatorClient operatorv1helpers.OperatorClient, + versionGetter VersionGetter, + recorder events.Recorder, +) *StatusSyncer { + c := &StatusSyncer{ + clusterOperatorName: name, + relatedObjects: relatedObjects, + versionGetter: versionGetter, + clusterOperatorClient: clusterOperatorClient, + clusterOperatorLister: clusterOperatorInformer.Lister(), + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("status-controller"), + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StatusSyncer_"+strings.Replace(name, "-", "_", -1)), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + clusterOperatorInformer.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + c.cachesToSync = append(c.cachesToSync, clusterOperatorInformer.Informer().HasSynced) + + return c +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c StatusSyncer) sync() error { + detailedSpec, currentDetailedStatus, _, err := c.operatorClient.GetOperatorState() + if apierrors.IsNotFound(err) { + c.eventRecorder.Warningf("StatusNotFound", "Unable to determine current operator status for clusteroperator/%s", c.clusterOperatorName) + if err := c.clusterOperatorClient.ClusterOperators().Delete(c.clusterOperatorName, nil); err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil + } + if err != nil { + return err + } + + originalClusterOperatorObj, err := c.clusterOperatorLister.Get(c.clusterOperatorName) + if err != nil && !apierrors.IsNotFound(err) { + c.eventRecorder.Warningf("StatusFailed", "Unable to get current operator status for clusteroperator/%s: %v", c.clusterOperatorName, err) + return err + } + + // ensure that we have a clusteroperator resource + if originalClusterOperatorObj == nil || apierrors.IsNotFound(err) { + klog.Infof("clusteroperator/%s not found", c.clusterOperatorName) + var createErr error + originalClusterOperatorObj, createErr = c.clusterOperatorClient.ClusterOperators().Create(&configv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{Name: c.clusterOperatorName}, + }) + if apierrors.IsNotFound(createErr) { + // this means that the API isn't present. We did not fail. Try again later + klog.Infof("ClusterOperator API not created") + c.queue.AddRateLimited(workQueueKey) + return nil + } + if createErr != nil { + c.eventRecorder.Warningf("StatusCreateFailed", "Failed to create operator status: %v", err) + return createErr + } + } + clusterOperatorObj := originalClusterOperatorObj.DeepCopy() + + if detailedSpec.ManagementState == operatorv1.Unmanaged && !management.IsOperatorAlwaysManaged() { + clusterOperatorObj.Status = configv1.ClusterOperatorStatus{} + + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorAvailable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorProgressing, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorDegraded, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, configv1.ClusterOperatorStatusCondition{Type: configv1.OperatorUpgradeable, Status: configv1.ConditionUnknown, Reason: "Unmanaged"}) + + if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) { + return nil + } + if _, updateErr := c.clusterOperatorClient.ClusterOperators().UpdateStatus(clusterOperatorObj); err != nil { + return updateErr + } + c.eventRecorder.Eventf("OperatorStatusChanged", "Status for operator %s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status)) + return nil + } + + clusterOperatorObj.Status.RelatedObjects = c.relatedObjects + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionInertialCondition("Degraded", operatorv1.ConditionFalse, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Progressing", operatorv1.ConditionFalse, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Available", operatorv1.ConditionTrue, currentDetailedStatus.Conditions...)) + configv1helpers.SetStatusCondition(&clusterOperatorObj.Status.Conditions, unionCondition("Upgradeable", operatorv1.ConditionTrue, currentDetailedStatus.Conditions...)) + + // TODO work out removal. We don't always know the existing value, so removing early seems like a bad idea. Perhaps a remove flag. + versions := c.versionGetter.GetVersions() + for operand, version := range versions { + previousVersion := operatorv1helpers.SetOperandVersion(&clusterOperatorObj.Status.Versions, configv1.OperandVersion{Name: operand, Version: version}) + if previousVersion != version { + // having this message will give us a marker in events when the operator updated compared to when the operand is updated + c.eventRecorder.Eventf("OperatorVersionChanged", "clusteroperator/%s version %q changed from %q to %q", c.clusterOperatorName, operand, previousVersion, version) + } + } + + // if we have no diff, just return + if equality.Semantic.DeepEqual(clusterOperatorObj, originalClusterOperatorObj) { + return nil + } + klog.V(2).Infof("clusteroperator/%s diff %v", c.clusterOperatorName, resourceapply.JSONPatchNoError(originalClusterOperatorObj, clusterOperatorObj)) + + if _, updateErr := c.clusterOperatorClient.ClusterOperators().UpdateStatus(clusterOperatorObj); err != nil { + return updateErr + } + c.eventRecorder.Eventf("OperatorStatusChanged", "Status for clusteroperator/%s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status)) + return nil +} + +func OperatorConditionToClusterOperatorCondition(condition operatorv1.OperatorCondition) configv1.ClusterOperatorStatusCondition { + return configv1.ClusterOperatorStatusCondition{ + Type: configv1.ClusterStatusConditionType(condition.Type), + Status: configv1.ConditionStatus(condition.Status), + LastTransitionTime: condition.LastTransitionTime, + Reason: condition.Reason, + Message: condition.Message, + } +} + +func (c *StatusSyncer) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting StatusSyncer-" + c.clusterOperatorName) + defer klog.Infof("Shutting down StatusSyncer-" + c.clusterOperatorName) + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // start watching for version changes + go c.watchVersionGetter(ctx.Done()) + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *StatusSyncer) watchVersionGetter(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + versionCh := c.versionGetter.VersionChangedChannel() + // always kick at least once + c.queue.Add(workQueueKey) + + for { + select { + case <-stopCh: + return + case <-versionCh: + c.queue.Add(workQueueKey) + } + } +} + +func (c *StatusSyncer) runWorker(_ context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *StatusSyncer) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *StatusSyncer) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go new file mode 100644 index 00000000000..484697da401 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go @@ -0,0 +1,321 @@ +package status + +import ( + "reflect" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/client-go/config/clientset/versioned/fake" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" + + "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" + "github.com/openshift/library-go/pkg/operator/events" +) + +func TestDegraded(t *testing.T) { + + threeMinutesAgo := metav1.NewTime(time.Now().Add(-3 * time.Minute)) + fiveSecondsAgo := metav1.NewTime(time.Now().Add(-2 * time.Second)) + yesterday := metav1.NewTime(time.Now().Add(-24 * time.Hour)) + + testCases := []struct { + name string + conditions []operatorv1.OperatorCondition + expectedType configv1.ClusterStatusConditionType + expectedStatus configv1.ConditionStatus + expectedMessages []string + expectedReason string + }{ + { + name: "no data", + conditions: []operatorv1.OperatorCondition{}, + expectedStatus: configv1.ConditionUnknown, + expectedReason: "NoData", + }, + { + name: "one not failing/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeADegraded: a message from type a", + }, + }, + { + name: "one not failing/beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type a"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeADegraded: a message from type a", + }, + }, + { + name: "one failing/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeADegraded: a message from type a", + }, + }, + { + name: "one failing/beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, Message: "a message from type a", LastTransitionTime: threeMinutesAgo}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeADegraded", + expectedMessages: []string{ + "TypeADegraded: a message from type a", + }, + }, + { + name: "two present/one failing/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeADegraded: a message from type a", + }, + }, + { + name: "two present/one failing/beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type a"}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeADegraded", + expectedMessages: []string{ + "TypeADegraded: a message from type a", + }, + }, + { + name: "two present/second one failing/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + }, + }, + { + name: "two present/second one failing/beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type b"}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeBDegraded", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + }, + }, + { + name: "many present/some failing/all within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b\nanother message from type b"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type d"}, + }, + expectedStatus: configv1.ConditionFalse, + expectedReason: "AsExpected", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + "TypeBDegraded: another message from type b", + "TypeCDegraded: a message from type c", + "TypeDDegraded: a message from type d", + }, + }, + { + name: "many present/some failing some/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b\nanother message from type b"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type d"}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "MultipleConditionsMatching", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + "TypeBDegraded: another message from type b", + "TypeDDegraded: a message from type d", + }, + }, + { + name: "many present/some failing/all beyond threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type b\nanother message from type b"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type d"}, + }, + expectedStatus: configv1.ConditionTrue, + expectedReason: "MultipleConditionsMatching", + expectedMessages: []string{ + "TypeBDegraded: a message from type b", + "TypeBDegraded: another message from type b", + "TypeDDegraded: a message from type d", + }, + }, + { + name: "one progressing/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeAProgressing", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + }, + expectedType: configv1.OperatorProgressing, + expectedStatus: configv1.ConditionTrue, + expectedReason: "TypeAProgressing", + expectedMessages: []string{ + "TypeAProgressing: a message from type a", + }, + }, + { + name: "one not available/within threshold", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeAAvailable", Status: operatorv1.ConditionFalse, LastTransitionTime: fiveSecondsAgo, Message: "a message from type a"}, + }, + expectedType: configv1.OperatorAvailable, + expectedStatus: configv1.ConditionFalse, + expectedReason: "TypeAAvailable", + expectedMessages: []string{ + "TypeAAvailable: a message from type a", + }, + }, + { + name: "two present/one available/one unknown", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeAAvailable", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a is great"}, + {Type: "TypeBAvailable", Status: operatorv1.ConditionUnknown, LastTransitionTime: fiveSecondsAgo, Message: "b is confused"}, + }, + expectedType: configv1.OperatorAvailable, + expectedStatus: configv1.ConditionUnknown, + expectedReason: "TypeBAvailable", + expectedMessages: []string{ + "TypeBAvailable: b is confused", + }, + }, + { + name: "two present/one unavailable/one unknown", + conditions: []operatorv1.OperatorCondition{ + {Type: "TypeAAvailable", Status: operatorv1.ConditionFalse, LastTransitionTime: fiveSecondsAgo, Message: "a is bad"}, + {Type: "TypeBAvailable", Status: operatorv1.ConditionUnknown, LastTransitionTime: fiveSecondsAgo, Message: "b is confused"}, + }, + expectedType: configv1.OperatorAvailable, + expectedStatus: configv1.ConditionFalse, + expectedReason: "MultipleConditionsMatching", + expectedMessages: []string{ + "TypeAAvailable: a is bad", + "TypeBAvailable: b is confused", + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + for _, condition := range tc.conditions { + if condition.LastTransitionTime == (metav1.Time{}) { + t.Fatal("LastTransitionTime not set.") + } + } + if tc.expectedType == "" { + tc.expectedType = configv1.OperatorDegraded + } + clusterOperator := &configv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{Name: "OPERATOR_NAME", ResourceVersion: "12"}, + } + clusterOperatorClient := fake.NewSimpleClientset(clusterOperator) + + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + indexer.Add(clusterOperator) + + statusClient := &statusClient{ + t: t, + status: operatorv1.OperatorStatus{ + Conditions: tc.conditions, + }, + } + controller := &StatusSyncer{ + clusterOperatorName: "OPERATOR_NAME", + clusterOperatorClient: clusterOperatorClient.ConfigV1(), + clusterOperatorLister: configv1listers.NewClusterOperatorLister(indexer), + operatorClient: statusClient, + eventRecorder: events.NewInMemoryRecorder("status"), + versionGetter: NewVersionGetter(), + } + if err := controller.sync(); err != nil { + t.Errorf("unexpected sync error: %v", err) + return + } + result, _ := clusterOperatorClient.ConfigV1().ClusterOperators().Get("OPERATOR_NAME", metav1.GetOptions{}) + + var expectedCondition *configv1.ClusterOperatorStatusCondition + if tc.expectedStatus != "" { + expectedCondition = &configv1.ClusterOperatorStatusCondition{ + Type: tc.expectedType, + Status: configv1.ConditionStatus(string(tc.expectedStatus)), + } + if len(tc.expectedMessages) > 0 { + expectedCondition.Message = strings.Join(tc.expectedMessages, "\n") + } + if len(tc.expectedReason) > 0 { + expectedCondition.Reason = tc.expectedReason + } + } + + for i := range result.Status.Conditions { + result.Status.Conditions[i].LastTransitionTime = metav1.Time{} + } + + actual := v1helpers.FindStatusCondition(result.Status.Conditions, tc.expectedType) + if !reflect.DeepEqual(expectedCondition, actual) { + t.Error(diff.ObjectDiff(expectedCondition, actual)) + } + }) + } +} + +// OperatorStatusProvider +type statusClient struct { + t *testing.T + spec operatorv1.OperatorSpec + status operatorv1.OperatorStatus +} + +func (c *statusClient) Informer() cache.SharedIndexInformer { + c.t.Log("Informer called") + return nil +} + +func (c *statusClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.spec, &c.status, "", nil +} + +func (c *statusClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("missing") +} + +func (c *statusClient) UpdateOperatorStatus(string, *operatorv1.OperatorStatus) (status *operatorv1.OperatorStatus, err error) { + panic("missing") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go new file mode 100644 index 00000000000..3f3fcec9495 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go @@ -0,0 +1,90 @@ +package status + +import ( + "os" + "sync" + + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + "github.com/openshift/library-go/pkg/operator/events" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type versionGetter struct { + lock sync.Mutex + versions map[string]string + notificationChannels []chan struct{} +} + +const ( + operandImageVersionEnvVarName = "OPERAND_IMAGE_VERSION" + operatorImageVersionEnvVarName = "OPERATOR_IMAGE_VERSION" +) + +func NewVersionGetter() VersionGetter { + return &versionGetter{ + versions: map[string]string{}, + } +} + +func (v *versionGetter) SetVersion(operandName, version string) { + v.lock.Lock() + defer v.lock.Unlock() + + v.versions[operandName] = version + + for i := range v.notificationChannels { + ch := v.notificationChannels[i] + // don't let a slow consumer block the rest + go func() { + ch <- struct{}{} + }() + } +} + +func (v *versionGetter) GetVersions() map[string]string { + v.lock.Lock() + defer v.lock.Unlock() + + ret := map[string]string{} + for k, v := range v.versions { + ret[k] = v + } + return ret +} + +func (v *versionGetter) VersionChangedChannel() <-chan struct{} { + v.lock.Lock() + defer v.lock.Unlock() + + channel := make(chan struct{}, 50) + v.notificationChannels = append(v.notificationChannels, channel) + return channel +} + +func VersionForOperandFromEnv() string { + return os.Getenv(operandImageVersionEnvVarName) +} + +func VersionForOperatorFromEnv() string { + return os.Getenv(operatorImageVersionEnvVarName) +} + +func VersionForOperand(namespace, imagePullSpec string, configMapGetter corev1client.ConfigMapsGetter, eventRecorder events.Recorder) string { + versionMap := map[string]string{} + versionMapping, err := configMapGetter.ConfigMaps(namespace).Get("version-mapping", metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + eventRecorder.Warningf("VersionMappingFailure", "unable to get version mapping: %v", err) + return "" + } + if versionMapping != nil { + for version, image := range versionMapping.Data { + versionMap[image] = version + } + } + + // we have the actual daemonset and we need the pull spec + operandVersion := versionMap[imagePullSpec] + return operandVersion +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/version_test.go b/vendor/github.com/openshift/library-go/pkg/operator/status/version_test.go new file mode 100644 index 00000000000..f93b2bec4c8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/version_test.go @@ -0,0 +1,35 @@ +package status + +import ( + "reflect" + "testing" + "time" +) + +func TestVersionGetterBasic(t *testing.T) { + versionGetter := NewVersionGetter() + versions := versionGetter.GetVersions() + if versions == nil { + t.Fatal(versions) + } + + ch := versionGetter.VersionChangedChannel() + if ch == nil { + t.Fatal(ch) + } + + versionGetter.SetVersion("foo", "bar") + + select { + case <-ch: + actual := versionGetter.GetVersions() + expected := map[string]string{"foo": "bar"} + if !reflect.DeepEqual(expected, actual) { + t.Fatal(actual) + } + + case <-time.After(5 * time.Second): + t.Fatal("missing") + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go new file mode 100644 index 00000000000..400983b6ea6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go @@ -0,0 +1,195 @@ +package unsupportedconfigoverridescontroller + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const ( + controllerWorkQueueKey = "key" +) + +// UnsupportedConfigOverridesController is a controller that will copy source configmaps and secrets to their destinations. +// It will also mirror deletions by deleting destinations. +type UnsupportedConfigOverridesController struct { + operatorClient v1helpers.OperatorClient + + cachesToSync []cache.InformerSynced + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewUnsupportedConfigOverridesController creates UnsupportedConfigOverridesController. +func NewUnsupportedConfigOverridesController( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, +) *UnsupportedConfigOverridesController { + c := &UnsupportedConfigOverridesController{ + operatorClient: operatorClient, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "UnsupportedConfigOverridesController"), + eventRecorder: eventRecorder.WithComponentSuffix("unsupported-config-overrides-controller"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + c.cachesToSync = append(c.cachesToSync, operatorClient.Informer().HasSynced) + + return c +} + +func (c *UnsupportedConfigOverridesController) sync() error { + operatorSpec, _, _, err := c.operatorClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: condition.UnsupportedConfigOverridesUpgradeableConditionType, + Status: operatorv1.ConditionTrue, + Reason: "NoUnsupportedConfigOverrides", + } + if len(operatorSpec.UnsupportedConfigOverrides.Raw) > 0 { + cond.Status = operatorv1.ConditionFalse + cond.Reason = "UnsupportedConfigOverridesSet" + cond.Message = fmt.Sprintf("unsupportedConfigOverrides=%v", string(operatorSpec.UnsupportedConfigOverrides.Raw)) + + // try to get a prettier message + keys, err := keysSetInUnsupportedConfig(operatorSpec.UnsupportedConfigOverrides.Raw) + if err == nil { + cond.Message = fmt.Sprintf("setting: %v", keys.List()) + + } + } + + if _, _, updateError := v1helpers.UpdateStatus(c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil +} + +func keysSetInUnsupportedConfig(configYaml []byte) (sets.String, error) { + configJson, err := kyaml.ToJSON(configYaml) + if err != nil { + klog.Warning(err) + // maybe it's just json + configJson = configYaml + } + + config := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(configJson)).Decode(&config); err != nil { + return nil, err + } + + return keysSetInUnsupportedConfigMap([]string{}, config), nil +} + +func keysSetInUnsupportedConfigMap(pathSoFar []string, config map[string]interface{}) sets.String { + ret := sets.String{} + + for k, v := range config { + currPath := append(pathSoFar, k) + + switch castV := v.(type) { + case map[string]interface{}: + ret.Insert(keysSetInUnsupportedConfigMap(currPath, castV).UnsortedList()...) + case []interface{}: + ret.Insert(keysSetInUnsupportedConfigSlice(currPath, castV).UnsortedList()...) + default: + ret.Insert(strings.Join(currPath, ".")) + } + } + + return ret +} + +func keysSetInUnsupportedConfigSlice(pathSoFar []string, config []interface{}) sets.String { + ret := sets.String{} + + for index, v := range config { + currPath := append(pathSoFar, fmt.Sprintf("%d", index)) + + switch castV := v.(type) { + case map[string]interface{}: + ret.Insert(keysSetInUnsupportedConfigMap(currPath, castV).UnsortedList()...) + case []interface{}: + ret.Insert(keysSetInUnsupportedConfigSlice(currPath, castV).UnsortedList()...) + default: + ret.Insert(strings.Join(currPath, ".")) + } + } + + return ret +} + +func (c *UnsupportedConfigOverridesController) Run(ctx context.Context, workers int) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting UnsupportedConfigOverridesController") + defer klog.Infof("Shutting down UnsupportedConfigOverridesController") + if !cache.WaitForCacheSync(ctx.Done(), c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *UnsupportedConfigOverridesController) runWorker(ctx context.Context) { + for c.processNextWorkItem() { + } +} + +func (c *UnsupportedConfigOverridesController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *UnsupportedConfigOverridesController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(controllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(controllerWorkQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller_test.go new file mode 100644 index 00000000000..87e4e3d2353 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller_test.go @@ -0,0 +1,86 @@ +package unsupportedconfigoverridescontroller + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/sets" +) + +func TestKeysSetInUnsupportedConfig(t *testing.T) { + tests := []struct { + name string + + yaml string + expected sets.String + }{ + { + name: "empty", + yaml: "", + expected: sets.NewString(), + }, + { + name: "nested maps", + yaml: ` +apple: + banana: + carrot: hammer +`, + expected: sets.NewString( + "apple.banana.carrot", + ), + }, + { + name: "multiple nested maps", + yaml: ` +apple: + banana: + carrot: hammer + blueberry: + cabbage: saw +artichoke: plane +`, + expected: sets.NewString( + "apple.banana.carrot", + "apple.blueberry.cabbage", + "artichoke", + ), + }, + { + name: "multiple nested slices with nested maps", + yaml: ` +apple: + banana: + carrot: + - hammer + - chisel + - drawknife + blueberry: + - saw: + chives: + dill: square +artichoke: plane +`, + expected: sets.NewString( + "artichoke", + "apple.banana.carrot.0", + "apple.banana.carrot.1", + "apple.banana.carrot.2", + "apple.blueberry.0.chives.dill", + "apple.blueberry.0.saw", + ), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := keysSetInUnsupportedConfig([]byte(test.yaml)) + if err != nil { + t.Fatal(err) + } + + if !actual.Equal(test.expected) { + t.Fatalf("missing expected %v, extra actual %v", test.expected.Difference(actual).List(), actual.Difference(test.expected).List()) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go new file mode 100644 index 00000000000..0038bc55907 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go @@ -0,0 +1,102 @@ +package v1helpers + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +type combinedConfigMapGetter struct { + client corev1client.ConfigMapsGetter + listers KubeInformersForNamespaces +} + +func CachedConfigMapGetter(client corev1client.ConfigMapsGetter, listers KubeInformersForNamespaces) corev1client.ConfigMapsGetter { + return &combinedConfigMapGetter{ + client: client, + listers: listers, + } +} + +type combinedConfigMapInterface struct { + corev1client.ConfigMapInterface + lister corev1listers.ConfigMapNamespaceLister + namespace string +} + +func (g combinedConfigMapGetter) ConfigMaps(namespace string) corev1client.ConfigMapInterface { + return combinedConfigMapInterface{ + ConfigMapInterface: g.client.ConfigMaps(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace), + namespace: namespace, + } +} + +func (g combinedConfigMapInterface) Get(name string, options metav1.GetOptions) (*corev1.ConfigMap, error) { + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} +func (g combinedConfigMapInterface) List(opts metav1.ListOptions) (*corev1.ConfigMapList, error) { + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.ConfigMapList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} + +type combinedSecretGetter struct { + client corev1client.SecretsGetter + listers KubeInformersForNamespaces +} + +func CachedSecretGetter(client corev1client.SecretsGetter, listers KubeInformersForNamespaces) corev1client.SecretsGetter { + return &combinedSecretGetter{ + client: client, + listers: listers, + } +} + +type combinedSecretInterface struct { + corev1client.SecretInterface + lister corev1listers.SecretNamespaceLister + namespace string +} + +func (g combinedSecretGetter) Secrets(namespace string) corev1client.SecretInterface { + return combinedSecretInterface{ + SecretInterface: g.client.Secrets(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace), + namespace: namespace, + } +} + +func (g combinedSecretInterface) Get(name string, options metav1.GetOptions) (*corev1.Secret, error) { + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} + +func (g combinedSecretInterface) List(opts metav1.ListOptions) (*corev1.SecretList, error) { + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.SecretList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go new file mode 100644 index 00000000000..8933328978b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go @@ -0,0 +1,7 @@ +package v1helpers + +import "k8s.io/client-go/informers" + +func NewFakeKubeInformersForNamespaces(informers map[string]informers.SharedInformerFactory) KubeInformersForNamespaces { + return kubeInformersForNamespaces(informers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helper_test.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helper_test.go new file mode 100644 index 00000000000..018d54c2fd8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helper_test.go @@ -0,0 +1,151 @@ +package v1helpers + +import ( + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + + operatorsv1 "github.com/openshift/api/operator/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/diff" +) + +func newCondition(name, status, reason, message string, lastTransition *metav1.Time) operatorsv1.OperatorCondition { + ret := operatorsv1.OperatorCondition{ + Type: name, + Status: operatorsv1.ConditionStatus(status), + Reason: reason, + Message: message, + } + if lastTransition != nil { + ret.LastTransitionTime = *lastTransition + } + + return ret +} + +func TestSetOperatorCondition(t *testing.T) { + nowish := metav1.Now() + beforeish := metav1.Time{Time: nowish.Add(-10 * time.Second)} + afterish := metav1.Time{Time: nowish.Add(10 * time.Second)} + + tests := []struct { + name string + starting []operatorsv1.OperatorCondition + newCondition operatorsv1.OperatorCondition + expected []operatorsv1.OperatorCondition + }{ + { + name: "add to empty", + starting: []operatorsv1.OperatorCondition{}, + newCondition: newCondition("one", "True", "my-reason", "my-message", nil), + expected: []operatorsv1.OperatorCondition{ + newCondition("one", "True", "my-reason", "my-message", nil), + }, + }, + { + name: "add to non-conflicting", + starting: []operatorsv1.OperatorCondition{ + newCondition("two", "True", "my-reason", "my-message", nil), + }, + newCondition: newCondition("one", "True", "my-reason", "my-message", nil), + expected: []operatorsv1.OperatorCondition{ + newCondition("two", "True", "my-reason", "my-message", nil), + newCondition("one", "True", "my-reason", "my-message", nil), + }, + }, + { + name: "change existing status", + starting: []operatorsv1.OperatorCondition{ + newCondition("two", "True", "my-reason", "my-message", nil), + newCondition("one", "True", "my-reason", "my-message", nil), + }, + newCondition: newCondition("one", "False", "my-different-reason", "my-othermessage", nil), + expected: []operatorsv1.OperatorCondition{ + newCondition("two", "True", "my-reason", "my-message", nil), + newCondition("one", "False", "my-different-reason", "my-othermessage", nil), + }, + }, + { + name: "leave existing transition time", + starting: []operatorsv1.OperatorCondition{ + newCondition("two", "True", "my-reason", "my-message", nil), + newCondition("one", "True", "my-reason", "my-message", &beforeish), + }, + newCondition: newCondition("one", "True", "my-reason", "my-message", &afterish), + expected: []operatorsv1.OperatorCondition{ + newCondition("two", "True", "my-reason", "my-message", nil), + newCondition("one", "True", "my-reason", "my-message", &beforeish), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + SetOperatorCondition(&test.starting, test.newCondition) + if len(test.starting) != len(test.expected) { + t.Fatal(spew.Sdump(test.starting)) + } + + for i := range test.expected { + expected := test.expected[i] + actual := test.starting[i] + if expected.LastTransitionTime == (metav1.Time{}) { + actual.LastTransitionTime = metav1.Time{} + } + if !equality.Semantic.DeepEqual(expected, actual) { + t.Errorf(diff.ObjectDiff(expected, actual)) + } + } + }) + } +} + +func TestRemoveOperatorCondition(t *testing.T) { + tests := []struct { + name string + starting []operatorsv1.OperatorCondition + removeCondition string + expected []operatorsv1.OperatorCondition + }{ + { + name: "remove missing", + starting: []operatorsv1.OperatorCondition{}, + removeCondition: "one", + expected: []operatorsv1.OperatorCondition{}, + }, + { + name: "remove existing", + starting: []operatorsv1.OperatorCondition{ + newCondition("two", "True", "my-reason", "my-message", nil), + newCondition("one", "True", "my-reason", "my-message", nil), + }, + removeCondition: "two", + expected: []operatorsv1.OperatorCondition{ + newCondition("one", "True", "my-reason", "my-message", nil), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + RemoveOperatorCondition(&test.starting, test.removeCondition) + if len(test.starting) != len(test.expected) { + t.Fatal(spew.Sdump(test.starting)) + } + + for i := range test.expected { + expected := test.expected[i] + actual := test.starting[i] + if expected.LastTransitionTime == (metav1.Time{}) { + actual.LastTransitionTime = metav1.Time{} + } + if !equality.Semantic.DeepEqual(expected, actual) { + t.Errorf(diff.ObjectDiff(expected, actual)) + } + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go new file mode 100644 index 00000000000..65fb3e93091 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -0,0 +1,281 @@ +package v1helpers + +import ( + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/retry" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SetOperandVersion sets the new version and returns the previous value. +func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { + if versions == nil { + versions = &[]configv1.OperandVersion{} + } + existingVersion := FindOperandVersion(*versions, operandVersion.Name) + if existingVersion == nil { + *versions = append(*versions, operandVersion) + return "" + } + + previous := existingVersion.Version + existingVersion.Version = operandVersion.Version + return previous +} + +func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion { + if versions == nil { + return nil + } + for i := range versions { + if versions[i].Name == name { + return &versions[i] + } + } + return nil +} + +func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + existingCondition := FindOperatorCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + newConditions := []operatorv1.OperatorCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue) +} + +func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse) +} + +func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// UpdateOperatorSpecFunc is a func that mutates an operator spec. +type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error + +// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client. +func UpdateSpec(client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) { + updated := false + var operatorSpec *operatorv1.OperatorSpec + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + oldSpec, _, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newSpec := oldSpec.DeepCopy() + for _, update := range updateFuncs { + if err := update(newSpec); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldSpec, newSpec) { + return nil + } + + operatorSpec, _, err = client.UpdateOperatorSpec(resourceVersion, newSpec) + updated = err == nil + return err + }) + + return operatorSpec, updated, err +} + +// UpdateSpecConfigFn returns a func to update the config. +func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc { + return func(oldSpec *operatorv1.OperatorSpec) error { + oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}} + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error + +// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client. +func UpdateStatus(client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.OperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateConditionFunc returns a func to update a condition. +func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc { + return func(oldStatus *operatorv1.OperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error + +// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client. +func UpdateStaticPodStatus(client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateStaticPodConditionFn returns a func to update a condition. +func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc { + return func(oldStatus *operatorv1.StaticPodOperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +type aggregate []error + +var _ utilerrors.Aggregate = aggregate{} + +// NewMultiLineAggregate returns an aggregate error with multi-line output +func NewMultiLineAggregate(errList []error) error { + var errs []error + for _, e := range errList { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// Error is part of the error interface. +func (agg aggregate) Error() string { + msgs := make([]string, len(agg)) + for i := range agg { + msgs[i] = agg[i].Error() + } + return strings.Join(msgs, "\n") +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// MapToEnvVars converts a string-string map to a slice of corev1.EnvVar-s +func MapToEnvVars(mapEnvVars map[string]string) []corev1.EnvVar { + if mapEnvVars == nil { + return nil + } + + envVars := make([]corev1.EnvVar, len(mapEnvVars)) + i := 0 + for k, v := range mapEnvVars { + envVars[i] = corev1.EnvVar{Name: k, Value: v} + i++ + } + + // need to sort the slice so that kube-controller-manager-pod configmap does not change all the time + sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name }) + return envVars +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go new file mode 100644 index 00000000000..8a3636b334b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -0,0 +1,105 @@ +package v1helpers + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// KubeInformersForNamespaces is a simple way to combine several shared informers into a single struct with unified listing power +type KubeInformersForNamespaces interface { + Start(stopCh <-chan struct{}) + InformersFor(namespace string) informers.SharedInformerFactory + Namespaces() sets.String + + ConfigMapLister() corev1listers.ConfigMapLister + SecretLister() corev1listers.SecretLister +} + +func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { + ret := kubeInformersForNamespaces{} + for _, namespace := range namespaces { + if len(namespace) == 0 { + ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + continue + } + ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace)) + } + + return ret +} + +type kubeInformersForNamespaces map[string]informers.SharedInformerFactory + +func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) { + for _, informer := range i { + informer.Start(stopCh) + } +} + +func (i kubeInformersForNamespaces) Namespaces() sets.String { + return sets.StringKeySet(i) +} +func (i kubeInformersForNamespaces) InformersFor(namespace string) informers.SharedInformerFactory { + return i[namespace] +} + +func (i kubeInformersForNamespaces) HasInformersFor(namespace string) bool { + return i.InformersFor(namespace) != nil +} + +type configMapLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) ConfigMapLister() corev1listers.ConfigMapLister { + return configMapLister(i) +} + +func (l configMapLister) List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().ConfigMaps().Lister().List(selector) +} + +func (l configMapLister) ConfigMaps(namespace string) corev1listers.ConfigMapNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().ConfigMaps().Lister().ConfigMaps(namespace) +} + +type secretLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) SecretLister() corev1listers.SecretLister { + return secretLister(i) +} + +func (l secretLister) List(selector labels.Selector) (ret []*corev1.Secret, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Secrets().Lister().List(selector) +} + +func (l secretLister) Secrets(namespace string) corev1listers.SecretNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Secrets().Lister().Secrets(namespace) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go new file mode 100644 index 00000000000..4afb23a6121 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -0,0 +1,30 @@ +package v1helpers + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + "k8s.io/client-go/tools/cache" +) + +type OperatorClient interface { + Informer() cache.SharedIndexInformer + // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. + GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. + UpdateOperatorSpec(oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. + UpdateOperatorStatus(oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) +} + +type StaticPodOperatorClient interface { + OperatorClient + // GetStaticPodOperatorState returns the static pod operator spec, status and the resource version, + // potentially from a lister. + GetStaticPodOperatorState() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // GetStaticPodOperatorStateWithQuorum return the static pod operator spec, status and resource version + // directly from a server read. + GetStaticPodOperatorStateWithQuorum() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. + UpdateStaticPodOperatorStatus(resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go new file mode 100644 index 00000000000..dc336276e04 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -0,0 +1,230 @@ +package v1helpers + +import ( + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. +func NewFakeSharedIndexInformer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +type fakeSharedIndexInformer struct{} + +func (fakeSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) { +} + +func (fakeSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) { +} + +func (fakeSharedIndexInformer) GetStore() cache.Store { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetController() cache.Controller { + panic("implement me") +} + +func (fakeSharedIndexInformer) Run(stopCh <-chan struct{}) { + panic("implement me") +} + +func (fakeSharedIndexInformer) HasSynced() bool { + panic("implement me") +} + +func (fakeSharedIndexInformer) LastSyncResourceVersion() string { + panic("implement me") +} + +func (fakeSharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { + panic("implement me") +} + +// NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeStaticPodOperatorClient( + staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + return &fakeStaticPodOperatorClient{ + fakeStaticPodOperatorSpec: staticPodSpec, + fakeStaticPodOperatorStatus: staticPodStatus, + resourceVersion: "0", + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, + } +} + +type fakeStaticPodOperatorClient struct { + fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec + fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error +} + +func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus = status + return c.fakeStaticPodOperatorStatus, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + staticPodStatus := c.fakeStaticPodOperatorStatus.DeepCopy() + staticPodStatus.OperatorStatus = *status + if err := c.triggerStatusUpdateError(resourceVersion, staticPodStatus); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus.OperatorStatus = *status + return &c.fakeStaticPodOperatorStatus.OperatorStatus, nil +} + +// NewFakeNodeLister returns a fake node lister suitable to use in node controller unit test +func NewFakeNodeLister(client kubernetes.Interface) corev1listers.NodeLister { + return &fakeNodeLister{client: client} +} + +type fakeNodeLister struct { + client kubernetes.Interface +} + +func (n *fakeNodeLister) List(selector labels.Selector) ([]*corev1.Node, error) { + nodes, err := n.client.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + ret := []*corev1.Node{} + for i := range nodes.Items { + ret = append(ret, &nodes.Items[i]) + } + return ret, nil +} + +func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { + panic("implement me") +} + +func (n *fakeNodeLister) ListWithPredicate(predicate corev1listers.NodeConditionPredicate) ([]*corev1.Node, error) { + panic("implement me") +} + +// NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClient { + return &fakeOperatorClient{ + fakeOperatorSpec: spec, + fakeOperatorStatus: status, + resourceVersion: "0", + triggerStatusUpdateError: triggerErr, + } +} + +type fakeOperatorClient struct { + fakeOperatorSpec *operatorv1.OperatorSpec + fakeOperatorStatus *operatorv1.OperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error +} + +func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return c.fakeOperatorSpec, c.fakeOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeOperatorStatus = status + return c.fakeOperatorStatus, nil +} +func (c *fakeOperatorClient) UpdateOperatorSpec(string, *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare.go b/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare.go new file mode 100644 index 00000000000..40d615583c7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare.go @@ -0,0 +1,67 @@ +package versioning + +import ( + "github.com/blang/semver" +) + +type VersionRange interface { + Between(needle *semver.Version) bool + BetweenOrEmpty(needle *semver.Version) bool +} + +type versionRange struct { + lowerInclusive bool + lower semver.Version + + upperInclusive bool + upper semver.Version +} + +// NewRange is the "normal" [1.1.0, 1.2) +func NewRange(lowerInclusive, upperExclusive string) (VersionRange, error) { + lower, err := semver.Parse(lowerInclusive) + if err != nil { + return nil, err + } + upper, err := semver.Parse(upperExclusive) + if err != nil { + return nil, err + } + + return &versionRange{ + lowerInclusive: true, + lower: lower, + upper: upper, + }, nil +} + +func NewRangeOrDie(lowerInclusive, upperExclusive string) VersionRange { + ret, err := NewRange(lowerInclusive, upperExclusive) + if err != nil { + panic(err) + } + return ret +} + +func (r versionRange) Between(needle *semver.Version) bool { + switch { + case r.lowerInclusive && !r.upperInclusive: + return needle.GTE(r.lower) && needle.LT(r.upper) + case r.lowerInclusive && r.upperInclusive: + return needle.GTE(r.lower) && needle.LTE(r.upper) + case !r.lowerInclusive && !r.upperInclusive: + return needle.GT(r.lower) && needle.LT(r.upper) + case !r.lowerInclusive && r.upperInclusive: + return needle.GT(r.lower) && needle.LTE(r.upper) + + } + + panic("math broke") +} + +func (r versionRange) BetweenOrEmpty(needle *semver.Version) bool { + if needle == nil { + return true + } + return r.Between(needle) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare_test.go b/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare_test.go new file mode 100644 index 00000000000..c06bf32629c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/versioning/compare_test.go @@ -0,0 +1,51 @@ +package versioning + +import ( + "testing" + + "github.com/blang/semver" +) + +func TestBetween(t *testing.T) { + tests := []struct { + name string + versionRange VersionRange + needle semver.Version + + expected bool + }{ + { + name: "over", + versionRange: NewRangeOrDie("1.1.0", "1.2.0"), + needle: semver.MustParse("1.2.0"), + expected: false, + }, + { + name: "under", + versionRange: NewRangeOrDie("1.1.0", "1.2.0"), + needle: semver.MustParse("1.0.10"), + expected: false, + }, + { + name: "boundary", + versionRange: NewRangeOrDie("1.1.0", "1.2.0"), + needle: semver.MustParse("1.1.0"), + expected: true, + }, + { + name: "in", + versionRange: NewRangeOrDie("1.1.0", "1.2.0"), + needle: semver.MustParse("1.1.1"), + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual := test.versionRange.Between(&test.needle) + if test.expected != actual { + t.Errorf("expected %v, got %v", test.expected, actual) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go new file mode 100644 index 00000000000..4c2597e4656 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go @@ -0,0 +1,391 @@ +package watchdog + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/retry" +) + +type FileWatcherOptions struct { + // ProcessName is the name of the process to look for in /proc if non-empty, + // indentifying the process to send SIGTERM to. + ProcessName string + // PidFile contains the pid of the process to send SIGTERM to. Can be empty. + PidFile string + + // Files lists all files we want to monitor for changes + Files []string + KubeConfig string + + // Namespace to report events to + Namespace string + recorder events.Recorder + + // Interval specifies how aggressive we want to be in file checks + Interval time.Duration + + // Time to give the process to terminate gracefully + TerminationGracePeriod time.Duration + + // ReadyFile is touched when the watched files have been initially read + ReadyFile string + + // for unit-test to mock getting the process PID (unit-test) + findPidByNameFn func(name string) (int, bool, error) + + // processExistsFn to mock checking the process PID (unit-test) + processExistsFn func(int) (bool, error) + + // for unit-test to mock sending UNIX signals + handleTerminationFn func(pid int) error + + handleKillFn func(pid int) error + + // for unit-test to mock prefixing files (/proc/PID/root) + addProcPrefixToFilesFn func([]string, int) []string + + // lastTerminatedPid is used track the value of a PID that we already terminated + lastTerminatedPid int +} + +func NewFileWatcherOptions() *FileWatcherOptions { + return &FileWatcherOptions{ + findPidByNameFn: FindProcessByName, + processExistsFn: ProcessExists, + addProcPrefixToFilesFn: addProcPrefixToFiles, + handleTerminationFn: func(pid int) error { + return syscall.Kill(pid, syscall.SIGTERM) + }, + handleKillFn: func(pid int) error { + return syscall.Kill(pid, syscall.SIGKILL) + }, + } +} + +// NewFileWatcherWatchdog return the file watcher watchdog command. +// This command should be used as a side-car to a container which will react to file changes in the main container +// and terminate the main container process in case a change is observed. +// TODO: If the main container start before the watchdog side-car container (image pull) there might be a case +// the watchdog won't react to a changed file (simply because it is not running yet). In that case the main process +// will not be reloaded. However, the operator image should be pulled on master node and therefore chances to hit this +// case are minimal. +func NewFileWatcherWatchdog() *cobra.Command { + o := NewFileWatcherOptions() + + cmd := &cobra.Command{ + Use: "file-watcher-watchdog", + Short: "Watch files on the disk and terminate the specified process on change", + Run: func(cmd *cobra.Command, args []string) { + klog.V(1).Info(cmd.Flags()) + klog.V(1).Info(spew.Sdump(o)) + + // Handle shutdown + termHandler := server.SetupSignalHandler() + ctx, shutdown := context.WithCancel(context.TODO()) + go func() { + defer shutdown() + <-termHandler + }() + + if err := o.Complete(); err != nil { + klog.Fatal(err) + } + if err := o.Validate(); err != nil { + klog.Fatal(err) + } + + if err := o.Run(ctx); err != nil { + klog.Fatal(err) + } + }, + } + + o.AddFlags(cmd.Flags()) + + return cmd +} + +func (o *FileWatcherOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.ProcessName, "process-name", "", "base name of the binary to send the TERM signal to on file change (eg. 'hyperkube').") + fs.StringVar(&o.PidFile, "pid-file", "", "file with the pid to send the TERM signal to on file change.") + fs.StringSliceVar(&o.Files, "files", o.Files, "comma separated list of file names to monitor for changes") + fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "kubeconfig file or empty") + fs.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to report the watchdog events") + fs.DurationVar(&o.Interval, "interval", 5*time.Second, "interval specifying how aggressive the file checks should be") + fs.DurationVar(&o.TerminationGracePeriod, "termination-grace-period", 30*time.Second, "interval specifying how long to wait until sending KILL signal to the process") + fs.StringVar(&o.ReadyFile, "ready-file", o.ReadyFile, "this file is touched when the watched files have been read initially (to avoid race between watchee and watcher)") +} + +func (o *FileWatcherOptions) Complete() error { + clientConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfig, nil) + if err != nil { + return err + } + kubeClient, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return err + } + + ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) + defer cancel() + + // Get event recorder. + // Retry on connection errors for 10s, but don't error out, instead fallback to the namespace. + var eventTarget *v1.ObjectReference + err = retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) { + var clientErr error + eventTarget, clientErr = events.GetControllerReferenceForCurrentPod(kubeClient, o.Namespace, nil) + if clientErr != nil { + return false, clientErr + } + return true, nil + }) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + o.recorder = events.NewRecorder(kubeClient.CoreV1().Events(o.Namespace), "file-change-watchdog", eventTarget) + + return nil +} + +func (o *FileWatcherOptions) Validate() error { + if len(o.ProcessName) == 0 && len(o.PidFile) == 0 { + return fmt.Errorf("process name or pid file must be specified") + } + if len(o.Files) == 0 { + return fmt.Errorf("at least one file to observe must be specified") + } + if len(o.Namespace) == 0 && len(os.Getenv("POD_NAMESPACE")) == 0 { + return fmt.Errorf("either namespace flag or POD_NAMESPACE environment variable must be specified") + } + return nil +} + +// runPidObserver runs a loop that observes changes to the PID of the process we send signals after change is detected. +func (o *FileWatcherOptions) runPidObserver(ctx context.Context, pidObservedCh chan int) { + defer close(pidObservedCh) + currentPID := 0 + retries := 0 + pollErr := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + retries++ + observedPID := -1 + if len(o.ProcessName) > 0 { + // attempt to find the PID by process name via /proc + pid, found, err := o.findPidByNameFn(o.ProcessName) + if !found || err != nil { + klog.Warningf("Unable to determine PID for %q (retry: %d, err: %v)", o.ProcessName, retries, err) + } else { + observedPID = pid + } + } + if len(o.PidFile) > 0 { + // attempt to find the PID by pid file + bs, err := ioutil.ReadFile(o.PidFile) + if err != nil { + klog.Warningf("Unable to read pid file %s: %v", o.PidFile, err) + } else { + lines := strings.SplitN(string(bs), "\n", 2) + i, err := strconv.Atoi(lines[0]) + if err != nil { + klog.Warningf("Unable to parse pid file %s: %v", o.PidFile, err) + } else { + observedPID = i + } + } + } + if observedPID < 0 { + return false, nil + } + + if currentPID == 0 { + currentPID = observedPID + // notify runWatchdog when the PID is initially observed (we need the PID to mutate file paths). + pidObservedCh <- observedPID + } + + // watch for PID changes, when observed restart the observer and wait for the new PID to appear. + if currentPID != observedPID { + return true, nil + } + + return false, nil + }, ctx.Done()) + + // These are not fatal errors, but we still want to log them out + if pollErr != nil && pollErr != wait.ErrWaitTimeout { + klog.Warningf("Unexpected error: %v", pollErr) + } +} + +// readInitialFileContent reads the content of files specified. +// This is needed by file observer. +func readInitialFileContent(files []string) (map[string][]byte, error) { + initialContent := map[string][]byte{} + for _, name := range files { + // skip files that does not exists (yet) + if _, err := os.Stat(name); os.IsNotExist(err) { + continue + } + content, err := ioutil.ReadFile(name) + if err != nil { + return nil, err + } + initialContent[name] = content + } + return initialContent, nil +} + +// addProcPrefixToFiles mutates the file list and prefix every file with /proc/PID/root. +// With shared pid namespace, we are able to access the target container filesystem via /proc. +func addProcPrefixToFiles(oldFiles []string, pid int) []string { + files := []string{} + for _, file := range oldFiles { + files = append(files, filepath.Join("/proc", fmt.Sprintf("%d", pid), "root", file)) + } + return files +} + +// Run the main watchdog loop. +func (o *FileWatcherOptions) Run(ctx context.Context) error { + for { + { + o.lastTerminatedPid = 0 + instanceCtx, shutdown := context.WithCancel(ctx) + defer shutdown() + select { + case <-ctx.Done(): + // exit(0) + shutdown() + return nil + default: + } + if err := o.runWatchdog(instanceCtx); err != nil { + return err + } + } + } +} + +func (o *FileWatcherOptions) terminateGracefully(pid int) error { + // Send SIGTERM to the process + klog.Infof("Sending TERM signal to %d ...", pid) + if err := o.handleTerminationFn(pid); err != nil { + return err + } + // Wait TerminationGracePeriodSeconds or until the process is not removed from /proc + pollErr := wait.PollImmediate(500*time.Millisecond, o.TerminationGracePeriod, func() (done bool, err error) { + if exists, err := o.processExistsFn(pid); !exists && err == nil { + return true, nil + } else if err != nil { + return true, err + } + return false, nil + }) + // If the process still exists and the TerminationGracePeriodSeconds passed, send kill signal and return + if pollErr == wait.ErrWaitTimeout { + klog.Infof("Sending KILL signal to %d ...", pid) + return o.handleKillFn(pid) + } + return pollErr +} + +// runWatchdog run single instance of watchdog. +func (o *FileWatcherOptions) runWatchdog(ctx context.Context) error { + watchdogCtx, shutdown := context.WithCancel(ctx) + defer shutdown() + + // Handle watchdog shutdown + go func() { + defer shutdown() + <-ctx.Done() + }() + + pidObservedCh := make(chan int) + go o.runPidObserver(watchdogCtx, pidObservedCh) + + // Wait while we get the initial PID for the process + if len(o.ProcessName) > 0 && len(o.PidFile) > 0 { + klog.Infof("Waiting for process with name %q or PID file %q...", o.ProcessName, o.PidFile) + } else if len(o.ProcessName) > 0 { + klog.Infof("Waiting for process with process name %q ...", o.ProcessName) + } else if len(o.PidFile) > 0 { + klog.Infof("Waiting for process PID file %q ...", o.PidFile) + } + currentPID := <-pidObservedCh + + // Mutate path for specified files as '/proc/PID/root/' + // This means side-car container don't have to duplicate the mounts from main container. + // This require shared PID namespace feature. + filesToWatch := o.addProcPrefixToFilesFn(o.Files, currentPID) + klog.Infof("Watching for changes in: %s", spew.Sdump(filesToWatch)) + + // Read initial file content. If shared PID namespace does not work, this will error. + initialContent, err := readInitialFileContent(filesToWatch) + if err != nil { + // TODO: remove this once we get aggregated logging + o.recorder.Warningf("FileChangeWatchdogFailed", "Reading initial file content failed: %v", err) + return fmt.Errorf("unable to read initial file content: %v", err) + } + + o.recorder.Eventf("FileChangeWatchdogStarted", "Started watching files for process %s[%d]", o.ProcessName, currentPID) + + if len(o.ReadyFile) > 0 { + f, err := os.Create(o.ReadyFile) + if err != nil { + return fmt.Errorf("cannot touch ready file %q: %v", o.ReadyFile, err) + } + f.Close() + } + + observer, err := fileobserver.NewObserver(o.Interval) + if err != nil { + o.recorder.Warningf("ObserverFailed", "Failed to start to file observer: %v", err) + return fmt.Errorf("unable to start file observer: %v", err) + } + + observer.AddReactor(func(file string, action fileobserver.ActionType) error { + // We already signalled this PID to terminate and the process is being gracefully terminated now. + // Do not duplicate termination process for PID we already terminated, but wait for the new PID to appear. + if currentPID == o.lastTerminatedPid { + return nil + } + + o.lastTerminatedPid = currentPID + defer shutdown() + + o.recorder.Eventf("FileChangeObserved", "Observed change in file %q, gracefully terminating process %s[%d]", file, o.ProcessName, currentPID) + + if err := o.terminateGracefully(currentPID); err != nil { + o.recorder.Warningf("SignalFailed", "Failed to terminate process %s[%d] gracefully: %v", o.ProcessName, currentPID, err) + return err + } + + return nil + }, initialContent, filesToWatch...) + + go observer.Run(watchdogCtx.Done()) + + <-watchdogCtx.Done() + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go new file mode 100644 index 00000000000..a3f302570ed --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go @@ -0,0 +1,153 @@ +package watchdog + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" +) + +func TestPidObserver(t *testing.T) { + var currentPIDMutex = sync.Mutex{} + currentPID := 1 + + getProcessPIDByName := func(name string) (int, bool, error) { + currentPIDMutex.Lock() + defer currentPIDMutex.Unlock() + return currentPID, true, nil + } + + watcher := &FileWatcherOptions{ + ProcessName: "foo", + findPidByNameFn: getProcessPIDByName, + } + + pidObservedCh := make(chan int) + monitorTerminated := make(chan struct{}) + + go func() { + defer close(monitorTerminated) + watcher.runPidObserver(context.TODO(), pidObservedCh) + }() + + // We should receive the initial PID + select { + case pid := <-pidObservedCh: + if pid != 1 { + t.Fatalf("expected PID 1, got %d", pid) + } + t.Log("initial PID observed") + case <-time.After(10 * time.Second): + t.Fatal("timeout (observing initial PID)") + } + + // We changed the PID, the monitor should gracefully terminate + currentPIDMutex.Lock() + currentPID = 10 + currentPIDMutex.Unlock() + + select { + case <-monitorTerminated: + t.Log("monitor successfully terminated") + case <-time.After(10 * time.Second): + t.Fatal("timeout (terminating monitor)") + } +} + +func TestWatchdogRun(t *testing.T) { + signalTermRecv := make(chan int) + signalKillRecv := make(chan int) + + // Make temporary file we are going to watch and write changes + testDir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + if err := ioutil.WriteFile(filepath.Join(testDir, "testfile"), []byte("starting"), os.ModePerm); err != nil { + t.Fatal(err) + } + + opts := &FileWatcherOptions{ + ProcessName: "test", + Files: []string{filepath.Join(testDir, "testfile")}, + handleTerminationFn: func(pid int) error { + signalTermRecv <- pid + return nil + }, + handleKillFn: func(pid int) error { + signalKillRecv <- pid + return nil + }, + findPidByNameFn: func(name string) (int, bool, error) { + return 10, true, nil + }, + processExistsFn: func(int) (bool, error) { + return true, nil + }, + addProcPrefixToFilesFn: func(files []string, i int) []string { + return files + }, + Interval: 200 * time.Millisecond, + TerminationGracePeriod: 1 * time.Second, + recorder: eventstesting.NewTestingEventRecorder(t), + } + + // commandCtx is context used for the Run() method + commandCtx, shutdown := context.WithTimeout(context.TODO(), 1*time.Minute) + defer shutdown() + + commandTerminatedCh := make(chan struct{}) + go func() { + defer close(commandTerminatedCh) + if err := opts.Run(commandCtx); err != nil { + t.Fatal(err) + } + }() + + // Give file watcher time to observe the file + time.Sleep(1 * time.Second) + + // Modify the monitored file + if err := ioutil.WriteFile(filepath.Join(testDir, "testfile"), []byte("changed"), os.ModePerm); err != nil { + t.Fatal(err) + } + + select { + case pid := <-signalTermRecv: + if pid != 10 { + t.Errorf("expected received PID to be 10, got %d", pid) + } + case <-time.After(20 * time.Second): + t.Fatal("timeout (waiting for PID)") + } + + select { + case pid := <-signalKillRecv: + if pid != 10 { + t.Errorf("expected received PID to be 10, got %d", pid) + } + case <-time.After(20 * time.Second): + t.Fatal("timeout (waiting for PID)") + } + + select { + case <-commandTerminatedCh: + t.Fatal("run command is not expected to terminate") + default: + } + + // Test the shutdown sequence + shutdown() + select { + case <-commandTerminatedCh: + case <-time.After(20 * time.Second): + t.Fatal("run command failed to terminate") + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc.go new file mode 100644 index 00000000000..f497b41b9e1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc.go @@ -0,0 +1,76 @@ +package watchdog + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "syscall" +) + +// FindProcessByName find the process name specified by name and return the PID of that process. +// If the process is not found, the bool is false. +// NOTE: This require container with shared process namespace (if run as side-car). +func FindProcessByName(name string) (int, bool, error) { + files, err := ioutil.ReadDir("/proc") + if err != nil { + return 0, false, err + } + // sort means we start with the directories with numbers + sort.Slice(files, func(i, j int) bool { + return files[i].Name() < files[j].Name() + }) + for _, file := range files { + if !file.IsDir() { + continue + } + // only scan process directories (eg. /proc/1234) + pid, err := strconv.Atoi(file.Name()) + if err != nil { + continue + } + // read the /proc/123/exe symlink that points to a process + linkTarget := readlink(filepath.Join("/proc", file.Name(), "exe")) + if path.Base(linkTarget) != name { + continue + } + return pid, true, nil + } + return 0, false, nil +} + +// ProcessExists checks if the process specified by a PID exists in the /proc filesystem. +// Error is returned when the stat on the /proc dir fail (permission issue). +func ProcessExists(pid int) (bool, error) { + procDir, err := os.Stat(fmt.Sprintf("/proc/%d", pid)) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, err + } + if procDir.IsDir() { + return true, nil + } else { + return false, fmt.Errorf("unexpected error: /proc/%d is file, not directory", pid) + } +} + +// readlink is copied from the os.Readlink() but does not return error when the target path does not exists. +// This is used to read broken links as in case of share PID namespace, the /proc/1/exe points to a binary +// that does not exists from the source container. +func readlink(name string) string { + for l := 128; ; l *= 2 { + b := make([]byte, l) + n, _ := syscall.Readlink(name, b) + if n < 0 { + n = 0 + } + if n < l { + return string(b[0:n]) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc_test.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc_test.go new file mode 100644 index 00000000000..47b9e4bbccd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc_test.go @@ -0,0 +1,96 @@ +package watchdog + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadLink(t *testing.T) { + tests := []struct { + name string + evalResult func(string, string, string, *testing.T) + preRun func(t *testing.T) (target, linkPath, dirName string) + postRun func(linkPath, dirName string, t *testing.T) + }{ + { + name: "target exists", + evalResult: func(target, link, result string, t *testing.T) { + if result != target { + t.Errorf("expected %q to match %q", result, target) + } + }, + preRun: func(t *testing.T) (string, string, string) { + tmpDir, err := ioutil.TempDir("", "existing") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + if err := ioutil.WriteFile(filepath.Join(tmpDir, "testfile"), []byte{1}, os.ModePerm); err != nil { + t.Fatalf("unable to write file: %v", err) + } + if err := os.Symlink(filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile")); err != nil { + t.Fatalf("unable to make symlink: %v", err) + } + return filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile"), tmpDir + }, + postRun: func(linkPath, dirName string, t *testing.T) { + if err := os.RemoveAll(dirName); err != nil { + t.Fatalf("unable to remove %q: %v", dirName, err) + } + }, + }, + { + name: "target does not exists", + evalResult: func(target, link, result string, t *testing.T) { + if result != target { + t.Errorf("expected %q to match %q", result, target) + } + }, + preRun: func(t *testing.T) (string, string, string) { + tmpDir, err := ioutil.TempDir("", "broken") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + if err := os.Symlink(filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile")); err != nil { + t.Fatalf("unable to make symlink: %v", err) + } + return filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile"), tmpDir + }, + postRun: func(linkPath, dirName string, t *testing.T) { + if err := os.RemoveAll(dirName); err != nil { + t.Fatalf("unable to remove %q: %v", dirName, err) + } + }, + }, + { + name: "source does not exists", + evalResult: func(target, link, result string, t *testing.T) { + if len(result) > 0 { + t.Errorf("expected result be empty, got: %q", result) + } + }, + preRun: func(t *testing.T) (string, string, string) { + tmpDir, err := ioutil.TempDir("", "broken-source") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + return filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile"), tmpDir + }, + postRun: func(linkPath, dirName string, t *testing.T) { + if err := os.RemoveAll(dirName); err != nil { + t.Fatalf("unable to remove %q: %v", dirName, err) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + target, link, tempDir := test.preRun(t) + result := readlink(link) + test.evalResult(target, link, result, t) + test.postRun(link, tempDir, t) + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/proc/reaper.go b/vendor/github.com/openshift/library-go/pkg/proc/reaper.go new file mode 100644 index 00000000000..21f5f71ff51 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/proc/reaper.go @@ -0,0 +1,37 @@ +// +build linux + +package proc + +import ( + "os" + "os/signal" + "syscall" + + "k8s.io/klog" +) + +// StartReaper starts a goroutine to reap processes if called from a process +// that has pid 1. +func StartReaper() { + if os.Getpid() == 1 { + klog.V(4).Infof("Launching reaper") + go func() { + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGCHLD) + for { + // Wait for a child to terminate + sig := <-sigs + klog.V(4).Infof("Signal received: %v", sig) + for { + // Reap processes + cpid, _ := syscall.Wait4(-1, nil, syscall.WNOHANG, nil) + if cpid < 1 { + break + } + + klog.V(4).Infof("Reaped process with pid %d", cpid) + } + } + }() + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/proc/reaper_unsupported.go b/vendor/github.com/openshift/library-go/pkg/proc/reaper_unsupported.go new file mode 100644 index 00000000000..75644fa5a84 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/proc/reaper_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package proc + +// StartReaper has no effect on non-linux platforms. +// Support for other unices will be added. +func StartReaper() { +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go new file mode 100644 index 00000000000..712f9b8bcc7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go @@ -0,0 +1,409 @@ +package clusterquotamapping + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + kapierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + quotav1 "github.com/openshift/api/quota/v1" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + quotalister "github.com/openshift/client-go/quota/listers/quota/v1" +) + +// Look out, here there be dragons! +// There is a race when dealing with the DeltaFifo compression used to back a reflector for a controller that uses two +// SharedInformers for both their watch events AND their caches. The scenario looks like this +// +// 1. Add, Delete a namespace really fast, *before* the add is observed by the controller using the reflector. +// 2. Add or Update a quota that matches the Add namespace +// 3. The cache had the intermediate state for the namespace for some period of time. This makes the quota update the mapping indicating a match. +// 4. The ns Delete is compressed out and never delivered to the controller, so the improper match is never cleared. +// +// This sounds pretty bad, however, we fail in the "safe" direction and the consequences are detectable. +// When going from quota to namespace, you can get back a namespace that doesn't exist. There are no resource in a non-existence +// namespace, so you know to clear all referenced resources. In addition, this add/delete has to happen so fast +// that it would be nearly impossible for any resources to be created. If you do create resources, then we must be observing +// their deletes. When quota is replenished, we'll see that we need to clear any charges. +// +// When going from namespace to quota, you can get back a quota that doesn't exist. Since the cache is shared, +// we know that a missing quota means that there isn't anything for us to bill against, so we can skip it. +// +// If the mapping cache is wrong and a previously deleted quota or namespace is created, this controller +// correctly adds the items back to the list and clears out all previous mappings. +// +// In addition to those constraints, the timing threshold for actually hitting this problem is really tight. It's +// basically a script that is creating and deleting things as fast as it possibly can. Sub-millisecond in the fuzz +// test where I caught the problem. + +// NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas +func NewClusterQuotaMappingController(namespaceInformer corev1informers.NamespaceInformer, quotaInformer quotainformer.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { + c := newClusterQuotaMappingController(namespaceInformer.Informer(), quotaInformer) + c.namespaceLister = v1NamespaceLister{lister: namespaceInformer.Lister()} + return c +} + +type namespaceLister interface { + Each(label labels.Selector, fn func(metav1.Object) bool) error + Get(name string) (metav1.Object, error) +} + +type v1NamespaceLister struct { + lister corev1listers.NamespaceLister +} + +func (l v1NamespaceLister) Each(label labels.Selector, fn func(metav1.Object) bool) error { + results, err := l.lister.List(label) + if err != nil { + return err + } + for i := range results { + if !fn(results[i]) { + return nil + } + } + return nil +} +func (l v1NamespaceLister) Get(name string) (metav1.Object, error) { + return l.lister.Get(name) +} + +func newClusterQuotaMappingController(namespaceInformer cache.SharedIndexInformer, quotaInformer quotainformer.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { + c := &ClusterQuotaMappingController{ + namespaceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_clusterquotamappingcontroller_namespaces"), + quotaQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_clusterquotamappingcontroller_clusterquotas"), + clusterQuotaMapper: NewClusterQuotaMapper(), + } + namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addNamespace, + UpdateFunc: c.updateNamespace, + DeleteFunc: c.deleteNamespace, + }) + c.namespacesSynced = namespaceInformer.HasSynced + + quotaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addQuota, + UpdateFunc: c.updateQuota, + DeleteFunc: c.deleteQuota, + }) + c.quotaLister = quotaInformer.Lister() + c.quotasSynced = quotaInformer.Informer().HasSynced + + return c +} + +type ClusterQuotaMappingController struct { + namespaceQueue workqueue.RateLimitingInterface + namespaceLister namespaceLister + namespacesSynced func() bool + + quotaQueue workqueue.RateLimitingInterface + quotaLister quotalister.ClusterResourceQuotaLister + quotasSynced func() bool + + clusterQuotaMapper *clusterQuotaMapper +} + +func (c *ClusterQuotaMappingController) GetClusterQuotaMapper() ClusterQuotaMapper { + return c.clusterQuotaMapper +} + +func (c *ClusterQuotaMappingController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.namespaceQueue.ShutDown() + defer c.quotaQueue.ShutDown() + + klog.Infof("Starting ClusterQuotaMappingController controller") + defer klog.Infof("Shutting down ClusterQuotaMappingController controller") + + if !cache.WaitForCacheSync(stopCh, c.namespacesSynced, c.quotasSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + return + } + + klog.V(4).Infof("Starting workers for quota mapping controller workers") + for i := 0; i < workers; i++ { + go wait.Until(c.namespaceWorker, time.Second, stopCh) + go wait.Until(c.quotaWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *ClusterQuotaMappingController) syncQuota(quota *quotav1.ClusterResourceQuota) error { + matcherFunc, err := GetObjectMatcher(quota.Spec.Selector) + if err != nil { + return err + } + + if err := c.namespaceLister.Each(labels.Everything(), func(obj metav1.Object) bool { + // attempt to set the mapping. The quotas never collide with each other (same quota is never processed twice in parallel) + // so this means that the project we have is out of date, pull a more recent copy from the cache and retest + for { + matches, err := matcherFunc(obj) + if err != nil { + utilruntime.HandleError(err) + break + } + success, quotaMatches, _ := c.clusterQuotaMapper.setMapping(quota, obj, !matches) + if success { + break + } + + // if the quota is mismatched, then someone has updated the quota or has deleted the entry entirely. + // if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this + // execution is finished + if !quotaMatches { + return false + } + newer, err := c.namespaceLister.Get(obj.GetName()) + if kapierrors.IsNotFound(err) { + // if the namespace is gone, then the deleteNamespace path will be called, just continue + break + } + if err != nil { + utilruntime.HandleError(err) + break + } + obj = newer + } + return true + }); err != nil { + return err + } + + c.clusterQuotaMapper.completeQuota(quota) + return nil +} + +func (c *ClusterQuotaMappingController) syncNamespace(namespace metav1.Object) error { + allQuotas, err1 := c.quotaLister.List(labels.Everything()) + if err1 != nil { + return err1 + } + for i := range allQuotas { + quota := allQuotas[i] + + for { + matcherFunc, err := GetObjectMatcher(quota.Spec.Selector) + if err != nil { + utilruntime.HandleError(err) + break + } + + // attempt to set the mapping. The namespaces never collide with each other (same namespace is never processed twice in parallel) + // so this means that the quota we have is out of date, pull a more recent copy from the cache and retest + matches, err := matcherFunc(namespace) + if err != nil { + utilruntime.HandleError(err) + break + } + success, _, namespaceMatches := c.clusterQuotaMapper.setMapping(quota, namespace, !matches) + if success { + break + } + + // if the namespace is mismatched, then someone has updated the namespace or has deleted the entry entirely. + // if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this + // execution is finished + if !namespaceMatches { + return nil + } + + quota, err = c.quotaLister.Get(quota.Name) + if kapierrors.IsNotFound(err) { + // if the quota is gone, then the deleteQuota path will be called, just continue + break + } + if err != nil { + utilruntime.HandleError(err) + break + } + } + } + + c.clusterQuotaMapper.completeNamespace(namespace) + return nil +} + +func (c *ClusterQuotaMappingController) quotaWork() bool { + key, quit := c.quotaQueue.Get() + if quit { + return true + } + defer c.quotaQueue.Done(key) + + quota, err := c.quotaLister.Get(key.(string)) + if err != nil { + if errors.IsNotFound(err) { + c.quotaQueue.Forget(key) + return false + } + utilruntime.HandleError(err) + return false + } + + err = c.syncQuota(quota) + outOfRetries := c.quotaQueue.NumRequeues(key) > 5 + switch { + case err != nil && outOfRetries: + utilruntime.HandleError(err) + c.quotaQueue.Forget(key) + + case err != nil && !outOfRetries: + c.quotaQueue.AddRateLimited(key) + + default: + c.quotaQueue.Forget(key) + } + + return false +} + +func (c *ClusterQuotaMappingController) quotaWorker() { + for { + if quit := c.quotaWork(); quit { + return + } + } +} + +func (c *ClusterQuotaMappingController) namespaceWork() bool { + key, quit := c.namespaceQueue.Get() + if quit { + return true + } + defer c.namespaceQueue.Done(key) + + namespace, err := c.namespaceLister.Get(key.(string)) + if kapierrors.IsNotFound(err) { + c.namespaceQueue.Forget(key) + return false + } + if err != nil { + utilruntime.HandleError(err) + return false + } + + err = c.syncNamespace(namespace) + outOfRetries := c.namespaceQueue.NumRequeues(key) > 5 + switch { + case err != nil && outOfRetries: + utilruntime.HandleError(err) + c.namespaceQueue.Forget(key) + + case err != nil && !outOfRetries: + c.namespaceQueue.AddRateLimited(key) + + default: + c.namespaceQueue.Forget(key) + } + + return false +} + +func (c *ClusterQuotaMappingController) namespaceWorker() { + for { + if quit := c.namespaceWork(); quit { + return + } + } +} + +func (c *ClusterQuotaMappingController) deleteNamespace(obj interface{}) { + var name string + switch ns := obj.(type) { + case cache.DeletedFinalStateUnknown: + switch nested := ns.Obj.(type) { + case *corev1.Namespace: + name = nested.Name + default: + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace %T", ns.Obj)) + return + } + case *corev1.Namespace: + name = ns.Name + default: + utilruntime.HandleError(fmt.Errorf("not a Namespace %v", obj)) + return + } + c.clusterQuotaMapper.removeNamespace(name) +} + +func (c *ClusterQuotaMappingController) addNamespace(cur interface{}) { + c.enqueueNamespace(cur) +} +func (c *ClusterQuotaMappingController) updateNamespace(old, cur interface{}) { + c.enqueueNamespace(cur) +} +func (c *ClusterQuotaMappingController) enqueueNamespace(obj interface{}) { + switch ns := obj.(type) { + case *corev1.Namespace: + if !c.clusterQuotaMapper.requireNamespace(ns) { + return + } + default: + utilruntime.HandleError(fmt.Errorf("not a Namespace %v", obj)) + return + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + c.namespaceQueue.Add(key) +} + +func (c *ClusterQuotaMappingController) deleteQuota(obj interface{}) { + quota, ok1 := obj.(*quotav1.ClusterResourceQuota) + if !ok1 { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %v", obj)) + return + } + quota, ok = tombstone.Obj.(*quotav1.ClusterResourceQuota) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Quota %v", obj)) + return + } + } + + c.clusterQuotaMapper.removeQuota(quota.Name) +} + +func (c *ClusterQuotaMappingController) addQuota(cur interface{}) { + c.enqueueQuota(cur) +} +func (c *ClusterQuotaMappingController) updateQuota(old, cur interface{}) { + c.enqueueQuota(cur) +} +func (c *ClusterQuotaMappingController) enqueueQuota(obj interface{}) { + quota, ok := obj.(*quotav1.ClusterResourceQuota) + if !ok { + utilruntime.HandleError(fmt.Errorf("not a Quota %v", obj)) + return + } + if !c.clusterQuotaMapper.requireQuota(quota) { + return + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(quota) + if err != nil { + utilruntime.HandleError(err) + return + } + c.quotaQueue.Add(key) +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping_test.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping_test.go new file mode 100644 index 00000000000..cffcb10f68a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping_test.go @@ -0,0 +1,319 @@ +package clusterquotamapping + +import ( + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + kexternalinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + + quotav1 "github.com/openshift/api/quota/v1" + quotaclient "github.com/openshift/client-go/quota/clientset/versioned/fake" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions" +) + +var ( + keys = []string{"different", "used", "important", "every", "large"} + values = []string{"time", "person"} + annotationKeys = []string{"different", "used", "important", "every", "large", "foo.bar.baz/key", "whitespace key"} + annotationValues = []string{"Person", "time and place", "Thing", "me@example.com", "system:admin"} + namespaceNames = []string{ + "tokillamockingbird", "harrypotter", "1984", "prideandprejudice", "thediaryofayounggirl", "animalfarm", "thehobbit", + "thelittleprince", "thegreatgatsby", "thecatcherintherye", "lordoftherings", "janeeyre", "romeoandjuliet", "thechroniclesofnarnia", + "lordoftheflies", "thegivingtree", "charlottesweb", "greeneggsandham", "alicesadventuresinwonderland", "littlewomen", + "ofmiceandmend", "wutheringheights", "thehungergames", "gonewiththewind", "thepictureofdoriangray", "theadventuresofhuckleberryfinn", + "fahrenheit451", "hamlet", "thehitchhikersguidetothegalaxy", "bravenewworld", "lesmiserables", "crimeandpunishment", "memoirsofageisha", + } + quotaNames = []string{"emma", "olivia", "sophia", "ava", "isabella", "mia", "abigail", "emily", "charlotte", "harper"} + + maxSelectorKeys = 2 + maxLabels = 5 +) + +func TestClusterQuotaFuzzer(t *testing.T) { + for j := 0; j < 100; j++ { + t.Logf("attempt %d", (j + 1)) + runFuzzer(t) + } +} + +func runFuzzer(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + + startingNamespaces := CreateStartingNamespaces() + kubeClient := fake.NewSimpleClientset(startingNamespaces...) + nsWatch := watch.NewFake() + kubeClient.PrependWatchReactor("namespaces", clientgotesting.DefaultWatchReactor(nsWatch, nil)) + + kubeInformerFactory := kexternalinformers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + + startingQuotas := CreateStartingQuotas() + quotaWatch := watch.NewFake() + quotaClient := quotaclient.NewSimpleClientset(startingQuotas...) + quotaClient.PrependWatchReactor("clusterresourcequotas", clientgotesting.DefaultWatchReactor(quotaWatch, nil)) + quotaFactory := quotainformer.NewSharedInformerFactory(quotaClient, 0) + + controller := NewClusterQuotaMappingController(kubeInformerFactory.Core().V1().Namespaces(), quotaFactory.Quota().V1().ClusterResourceQuotas()) + go controller.Run(5, stopCh) + quotaFactory.Start(stopCh) + kubeInformerFactory.Start(stopCh) + + finalNamespaces := map[string]*corev1.Namespace{} + finalQuotas := map[string]*quotav1.ClusterResourceQuota{} + quotaActions := map[string][]string{} + namespaceActions := map[string][]string{} + finishedNamespaces := make(chan struct{}) + finishedQuotas := make(chan struct{}) + + for _, quota := range startingQuotas { + name := quota.(*quotav1.ClusterResourceQuota).Name + quotaActions[name] = append(quotaActions[name], fmt.Sprintf("inserting %v to %v", name, quota.(*quotav1.ClusterResourceQuota).Spec.Selector)) + finalQuotas[name] = quota.(*quotav1.ClusterResourceQuota) + } + for _, namespace := range startingNamespaces { + name := namespace.(*corev1.Namespace).Name + namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("inserting %v to %v", name, namespace.(*corev1.Namespace).Labels)) + finalNamespaces[name] = namespace.(*corev1.Namespace) + } + + go func() { + for i := 0; i < 200; i++ { + name := quotaNames[rand.Intn(len(quotaNames))] + _, exists := finalQuotas[name] + if rand.Intn(50) == 0 { + if !exists { + continue + } + // due to the compression race (see big comment for impl), clear the queue then delete + for { + if len(quotaWatch.ResultChan()) == 0 { + break + } + time.Sleep(10 * time.Millisecond) + } + + quotaActions[name] = append(quotaActions[name], "deleting "+name) + quotaWatch.Delete(finalQuotas[name]) + delete(finalQuotas, name) + continue + } + + quota := NewQuota(name) + finalQuotas[name] = quota + copied := quota.DeepCopy() + if exists { + quotaActions[name] = append(quotaActions[name], fmt.Sprintf("updating %v to %v", name, quota.Spec.Selector)) + quotaWatch.Modify(copied) + } else { + quotaActions[name] = append(quotaActions[name], fmt.Sprintf("adding %v to %v", name, quota.Spec.Selector)) + quotaWatch.Add(copied) + } + } + close(finishedQuotas) + }() + + go func() { + for i := 0; i < 200; i++ { + name := namespaceNames[rand.Intn(len(namespaceNames))] + _, exists := finalNamespaces[name] + if rand.Intn(50) == 0 { + if !exists { + continue + } + // due to the compression race (see big comment for impl), clear the queue then delete + for { + if len(nsWatch.ResultChan()) == 0 { + break + } + time.Sleep(10 * time.Millisecond) + } + + namespaceActions[name] = append(namespaceActions[name], "deleting "+name) + nsWatch.Delete(finalNamespaces[name]) + delete(finalNamespaces, name) + continue + } + + ns := NewNamespace(name) + finalNamespaces[name] = ns + copied := ns.DeepCopy() + if exists { + namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("updating %v to %v", name, ns.Labels)) + nsWatch.Modify(copied) + } else { + namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("adding %v to %v", name, ns.Labels)) + nsWatch.Add(copied) + } + } + close(finishedNamespaces) + }() + + <-finishedQuotas + <-finishedNamespaces + + finalFailures := []string{} + for i := 0; i < 200; i++ { + // better suggestions for testing doneness? Check the condition a few times? + time.Sleep(50 * time.Millisecond) + + finalFailures = checkState(controller, finalNamespaces, finalQuotas, t, quotaActions, namespaceActions) + if len(finalFailures) == 0 { + break + } + } + + if len(finalFailures) > 0 { + t.Logf("have %d quotas and %d namespaces", len(quotaWatch.ResultChan()), len(nsWatch.ResultChan())) + t.Fatalf("failed on \n%v", strings.Join(finalFailures, "\n")) + } +} + +func checkState(controller *ClusterQuotaMappingController, finalNamespaces map[string]*corev1.Namespace, finalQuotas map[string]*quotav1.ClusterResourceQuota, t *testing.T, quotaActions, namespaceActions map[string][]string) []string { + failures := []string{} + + quotaToNamespaces := map[string]sets.String{} + for _, quotaName := range quotaNames { + quotaToNamespaces[quotaName] = sets.String{} + } + namespacesToQuota := map[string]sets.String{} + for _, namespaceName := range namespaceNames { + namespacesToQuota[namespaceName] = sets.String{} + } + for _, quota := range finalQuotas { + matcherFunc, err := GetMatcher(quota.Spec.Selector) + if err != nil { + t.Fatal(err) + } + for _, namespace := range finalNamespaces { + if matches, _ := matcherFunc(namespace); matches { + quotaToNamespaces[quota.Name].Insert(namespace.Name) + namespacesToQuota[namespace.Name].Insert(quota.Name) + } + } + } + + for _, quotaName := range quotaNames { + namespaces, selector := controller.clusterQuotaMapper.GetNamespacesFor(quotaName) + nsSet := sets.NewString(namespaces...) + if !nsSet.Equal(quotaToNamespaces[quotaName]) { + failures = append(failures, fmt.Sprintf("quota %v, expected %v, got %v", quotaName, quotaToNamespaces[quotaName].List(), nsSet.List())) + failures = append(failures, quotaActions[quotaName]...) + } + if quota, ok := finalQuotas[quotaName]; ok && !reflect.DeepEqual(quota.Spec.Selector, selector) { + failures = append(failures, fmt.Sprintf("quota %v, expected %v, got %v", quotaName, quota.Spec.Selector, selector)) + } + } + + for _, namespaceName := range namespaceNames { + quotas, selectionFields := controller.clusterQuotaMapper.GetClusterQuotasFor(namespaceName) + quotaSet := sets.NewString(quotas...) + if !quotaSet.Equal(namespacesToQuota[namespaceName]) { + failures = append(failures, fmt.Sprintf("namespace %v, expected %v, got %v", namespaceName, namespacesToQuota[namespaceName].List(), quotaSet.List())) + failures = append(failures, namespaceActions[namespaceName]...) + } + if namespace, ok := finalNamespaces[namespaceName]; ok && !reflect.DeepEqual(GetSelectionFields(namespace), selectionFields) { + failures = append(failures, fmt.Sprintf("namespace %v, expected %v, got %v", namespaceName, GetSelectionFields(namespace), selectionFields)) + } + } + + return failures +} + +func CreateStartingQuotas() []runtime.Object { + count := rand.Intn(len(quotaNames)) + used := sets.String{} + ret := []runtime.Object{} + + for i := 0; i < count; i++ { + name := quotaNames[rand.Intn(len(quotaNames))] + if !used.Has(name) { + ret = append(ret, NewQuota(name)) + used.Insert(name) + } + } + + return ret +} + +func CreateStartingNamespaces() []runtime.Object { + count := rand.Intn(len(namespaceNames)) + used := sets.String{} + ret := []runtime.Object{} + + for i := 0; i < count; i++ { + name := namespaceNames[rand.Intn(len(namespaceNames))] + if !used.Has(name) { + ret = append(ret, NewNamespace(name)) + used.Insert(name) + } + } + + return ret +} + +func NewQuota(name string) *quotav1.ClusterResourceQuota { + ret := "av1.ClusterResourceQuota{} + ret.Name = name + + numSelectorKeys := rand.Intn(maxSelectorKeys) + 1 + if numSelectorKeys == 0 { + return ret + } + + ret.Spec.Selector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} + for i := 0; i < numSelectorKeys; i++ { + key := keys[rand.Intn(len(keys))] + value := values[rand.Intn(len(values))] + + ret.Spec.Selector.LabelSelector.MatchLabels[key] = value + } + + ret.Spec.Selector.AnnotationSelector = map[string]string{} + for i := 0; i < numSelectorKeys; i++ { + key := annotationKeys[rand.Intn(len(annotationKeys))] + value := annotationValues[rand.Intn(len(annotationValues))] + + ret.Spec.Selector.AnnotationSelector[key] = value + } + + return ret +} + +func NewNamespace(name string) *corev1.Namespace { + ret := &corev1.Namespace{} + ret.Name = name + + numLabels := rand.Intn(maxLabels) + 1 + if numLabels == 0 { + return ret + } + + ret.Labels = map[string]string{} + for i := 0; i < numLabels; i++ { + key := keys[rand.Intn(len(keys))] + value := values[rand.Intn(len(values))] + + ret.Labels[key] = value + } + + ret.Annotations = map[string]string{} + for i := 0; i < numLabels; i++ { + key := annotationKeys[rand.Intn(len(annotationKeys))] + value := annotationValues[rand.Intn(len(annotationValues))] + + ret.Annotations[key] = value + } + + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go new file mode 100644 index 00000000000..0c2c2ae7a5c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go @@ -0,0 +1,139 @@ +package clusterquotamapping + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func GetResourceQuotasStatusByNamespace(namespaceStatuses quotav1.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) { + for i := range namespaceStatuses { + curr := namespaceStatuses[i] + if curr.Namespace == namespace { + return curr.Status, true + } + } + return corev1.ResourceQuotaStatus{}, false +} + +func RemoveResourceQuotasStatusByNamespace(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, namespace string) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == namespace { + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + *namespaceStatuses = newNamespaceStatuses +} + +func InsertResourceQuotasStatus(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, newStatus quotav1.ResourceQuotaStatusByNamespace) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + found := false + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == newStatus.Namespace { + // do this so that we don't change serialization order + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + found = true + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + if !found { + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + } + *namespaceStatuses = newNamespaceStatuses +} + +var accessor = meta.NewAccessor() + +func GetMatcher(selector quotav1.ClusterResourceQuotaSelector) (func(obj runtime.Object) (bool, error), error) { + var labelSelector labels.Selector + if selector.LabelSelector != nil { + var err error + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, err + } + } + + var annotationSelector map[string]string + if len(selector.AnnotationSelector) > 0 { + // ensure our matcher has a stable copy of the map + annotationSelector = make(map[string]string, len(selector.AnnotationSelector)) + for k, v := range selector.AnnotationSelector { + annotationSelector[k] = v + } + } + + return func(obj runtime.Object) (bool, error) { + if labelSelector != nil { + objLabels, err := accessor.Labels(obj) + if err != nil { + return false, err + } + if !labelSelector.Matches(labels.Set(objLabels)) { + return false, nil + } + } + + if annotationSelector != nil { + objAnnotations, err := accessor.Annotations(obj) + if err != nil { + return false, err + } + for k, v := range annotationSelector { + if objValue, exists := objAnnotations[k]; !exists || objValue != v { + return false, nil + } + } + } + + return true, nil + }, nil +} + +func GetObjectMatcher(selector quotav1.ClusterResourceQuotaSelector) (func(obj metav1.Object) (bool, error), error) { + var labelSelector labels.Selector + if selector.LabelSelector != nil { + var err error + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, err + } + } + + var annotationSelector map[string]string + if len(selector.AnnotationSelector) > 0 { + // ensure our matcher has a stable copy of the map + annotationSelector = make(map[string]string, len(selector.AnnotationSelector)) + for k, v := range selector.AnnotationSelector { + annotationSelector[k] = v + } + } + + return func(obj metav1.Object) (bool, error) { + if labelSelector != nil { + if !labelSelector.Matches(labels.Set(obj.GetLabels())) { + return false, nil + } + } + + if annotationSelector != nil { + objAnnotations := obj.GetAnnotations() + for k, v := range annotationSelector { + if objValue, exists := objAnnotations[k]; !exists || objValue != v { + return false, nil + } + } + } + + return true, nil + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go new file mode 100644 index 00000000000..e8d66c4fa2a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go @@ -0,0 +1,289 @@ +package clusterquotamapping + +import ( + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + quotav1 "github.com/openshift/api/quota/v1" +) + +type ClusterQuotaMapper interface { + // GetClusterQuotasFor returns the list of clusterquota names that this namespace matches. It also + // returns the selectionFields associated with the namespace for the check so that callers can determine staleness + GetClusterQuotasFor(namespaceName string) ([]string, SelectionFields) + // GetNamespacesFor returns the list of namespace names that this cluster quota matches. It also + // returns the selector associated with the clusterquota for the check so that callers can determine staleness + GetNamespacesFor(quotaName string) ([]string, quotav1.ClusterResourceQuotaSelector) + + AddListener(listener MappingChangeListener) +} + +// MappingChangeListener is notified of changes to the mapping. It must not block. +type MappingChangeListener interface { + AddMapping(quotaName, namespaceName string) + RemoveMapping(quotaName, namespaceName string) +} + +type SelectionFields struct { + Labels map[string]string + Annotations map[string]string +} + +// clusterQuotaMapper gives thread safe access to the actual mappings that are being stored. +// Many method use a shareable read lock to check status followed by a non-shareable +// write lock which double checks the condition before proceeding. Since locks aren't escalatable +// you have to perform the recheck because someone could have beaten you in. +type clusterQuotaMapper struct { + lock sync.RWMutex + + // requiredQuotaToSelector indicates the latest label selector this controller has observed for a quota + requiredQuotaToSelector map[string]quotav1.ClusterResourceQuotaSelector + // requiredNamespaceToLabels indicates the latest selectionFields this controller has observed for a namespace + requiredNamespaceToLabels map[string]SelectionFields + // completedQuotaToSelector indicates the latest label selector this controller has scanned against namespaces + completedQuotaToSelector map[string]quotav1.ClusterResourceQuotaSelector + // completedNamespaceToLabels indicates the latest selectionFields this controller has scanned against cluster quotas + completedNamespaceToLabels map[string]SelectionFields + + quotaToNamespaces map[string]sets.String + namespaceToQuota map[string]sets.String + + listeners []MappingChangeListener +} + +func NewClusterQuotaMapper() *clusterQuotaMapper { + return &clusterQuotaMapper{ + requiredQuotaToSelector: map[string]quotav1.ClusterResourceQuotaSelector{}, + requiredNamespaceToLabels: map[string]SelectionFields{}, + completedQuotaToSelector: map[string]quotav1.ClusterResourceQuotaSelector{}, + completedNamespaceToLabels: map[string]SelectionFields{}, + + quotaToNamespaces: map[string]sets.String{}, + namespaceToQuota: map[string]sets.String{}, + } +} + +func (m *clusterQuotaMapper) GetClusterQuotasFor(namespaceName string) ([]string, SelectionFields) { + m.lock.RLock() + defer m.lock.RUnlock() + + quotas, ok := m.namespaceToQuota[namespaceName] + if !ok { + return []string{}, m.completedNamespaceToLabels[namespaceName] + } + return quotas.List(), m.completedNamespaceToLabels[namespaceName] +} + +func (m *clusterQuotaMapper) GetNamespacesFor(quotaName string) ([]string, quotav1.ClusterResourceQuotaSelector) { + m.lock.RLock() + defer m.lock.RUnlock() + + namespaces, ok := m.quotaToNamespaces[quotaName] + if !ok { + return []string{}, m.completedQuotaToSelector[quotaName] + } + return namespaces.List(), m.completedQuotaToSelector[quotaName] +} + +func (m *clusterQuotaMapper) AddListener(listener MappingChangeListener) { + m.lock.Lock() + defer m.lock.Unlock() + + m.listeners = append(m.listeners, listener) +} + +// requireQuota updates the selector requirements for the given quota. This prevents stale updates to the mapping itself. +// returns true if a modification was made +func (m *clusterQuotaMapper) requireQuota(quota *quotav1.ClusterResourceQuota) bool { + m.lock.RLock() + selector, exists := m.requiredQuotaToSelector[quota.Name] + m.lock.RUnlock() + + if selectorMatches(selector, exists, quota) { + return false + } + + m.lock.Lock() + defer m.lock.Unlock() + selector, exists = m.requiredQuotaToSelector[quota.Name] + if selectorMatches(selector, exists, quota) { + return false + } + + m.requiredQuotaToSelector[quota.Name] = quota.Spec.Selector + return true +} + +// completeQuota updates the latest selector used to generate the mappings for this quota. The value is returned +// by the Get methods for the mapping so that callers can determine staleness +func (m *clusterQuotaMapper) completeQuota(quota *quotav1.ClusterResourceQuota) { + m.lock.Lock() + defer m.lock.Unlock() + m.completedQuotaToSelector[quota.Name] = quota.Spec.Selector +} + +// removeQuota deletes a quota from all mappings +func (m *clusterQuotaMapper) removeQuota(quotaName string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.requiredQuotaToSelector, quotaName) + delete(m.completedQuotaToSelector, quotaName) + delete(m.quotaToNamespaces, quotaName) + for namespaceName, quotas := range m.namespaceToQuota { + if quotas.Has(quotaName) { + quotas.Delete(quotaName) + for _, listener := range m.listeners { + listener.RemoveMapping(quotaName, namespaceName) + } + } + } +} + +// requireNamespace updates the label requirements for the given namespace. This prevents stale updates to the mapping itself. +// returns true if a modification was made +func (m *clusterQuotaMapper) requireNamespace(namespace metav1.Object) bool { + m.lock.RLock() + selectionFields, exists := m.requiredNamespaceToLabels[namespace.GetName()] + m.lock.RUnlock() + + if selectionFieldsMatch(selectionFields, exists, namespace) { + return false + } + + m.lock.Lock() + defer m.lock.Unlock() + selectionFields, exists = m.requiredNamespaceToLabels[namespace.GetName()] + if selectionFieldsMatch(selectionFields, exists, namespace) { + return false + } + + m.requiredNamespaceToLabels[namespace.GetName()] = GetSelectionFields(namespace) + return true +} + +// completeNamespace updates the latest selectionFields used to generate the mappings for this namespace. The value is returned +// by the Get methods for the mapping so that callers can determine staleness +func (m *clusterQuotaMapper) completeNamespace(namespace metav1.Object) { + m.lock.Lock() + defer m.lock.Unlock() + m.completedNamespaceToLabels[namespace.GetName()] = GetSelectionFields(namespace) +} + +// removeNamespace deletes a namespace from all mappings +func (m *clusterQuotaMapper) removeNamespace(namespaceName string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.requiredNamespaceToLabels, namespaceName) + delete(m.completedNamespaceToLabels, namespaceName) + delete(m.namespaceToQuota, namespaceName) + for quotaName, namespaces := range m.quotaToNamespaces { + if namespaces.Has(namespaceName) { + namespaces.Delete(namespaceName) + for _, listener := range m.listeners { + listener.RemoveMapping(quotaName, namespaceName) + } + } + } +} + +func selectorMatches(selector quotav1.ClusterResourceQuotaSelector, exists bool, quota *quotav1.ClusterResourceQuota) bool { + return exists && equality.Semantic.DeepEqual(selector, quota.Spec.Selector) +} +func selectionFieldsMatch(selectionFields SelectionFields, exists bool, namespace metav1.Object) bool { + return exists && reflect.DeepEqual(selectionFields, GetSelectionFields(namespace)) +} + +// setMapping maps (or removes a mapping) between a clusterquota and a namespace +// It returns whether the action worked, whether the quota is out of date, whether the namespace is out of date +// This allows callers to decide whether to pull new information from the cache or simply skip execution +func (m *clusterQuotaMapper) setMapping(quota *quotav1.ClusterResourceQuota, namespace metav1.Object, remove bool) (bool /*added*/, bool /*quota matches*/, bool /*namespace matches*/) { + m.lock.RLock() + selector, selectorExists := m.requiredQuotaToSelector[quota.Name] + selectionFields, selectionFieldsExist := m.requiredNamespaceToLabels[namespace.GetName()] + m.lock.RUnlock() + + if !selectorMatches(selector, selectorExists, quota) { + return false, false, selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) + } + if !selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) { + return false, true, false + } + + m.lock.Lock() + defer m.lock.Unlock() + selector, selectorExists = m.requiredQuotaToSelector[quota.Name] + selectionFields, selectionFieldsExist = m.requiredNamespaceToLabels[namespace.GetName()] + if !selectorMatches(selector, selectorExists, quota) { + return false, false, selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) + } + if !selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) { + return false, true, false + } + + if remove { + mutated := false + + namespaces, ok := m.quotaToNamespaces[quota.Name] + if !ok { + m.quotaToNamespaces[quota.Name] = sets.String{} + } else { + mutated = namespaces.Has(namespace.GetName()) + namespaces.Delete(namespace.GetName()) + } + + quotas, ok := m.namespaceToQuota[namespace.GetName()] + if !ok { + m.namespaceToQuota[namespace.GetName()] = sets.String{} + } else { + mutated = mutated || quotas.Has(quota.Name) + quotas.Delete(quota.Name) + } + + if mutated { + for _, listener := range m.listeners { + listener.RemoveMapping(quota.Name, namespace.GetName()) + } + } + + return true, true, true + } + + mutated := false + + namespaces, ok := m.quotaToNamespaces[quota.Name] + if !ok { + mutated = true + m.quotaToNamespaces[quota.Name] = sets.NewString(namespace.GetName()) + } else { + mutated = !namespaces.Has(namespace.GetName()) + namespaces.Insert(namespace.GetName()) + } + + quotas, ok := m.namespaceToQuota[namespace.GetName()] + if !ok { + mutated = true + m.namespaceToQuota[namespace.GetName()] = sets.NewString(quota.Name) + } else { + mutated = mutated || !quotas.Has(quota.Name) + quotas.Insert(quota.Name) + } + + if mutated { + for _, listener := range m.listeners { + listener.AddMapping(quota.Name, namespace.GetName()) + } + } + + return true, true, true + +} + +func GetSelectionFields(namespace metav1.Object) SelectionFields { + return SelectionFields{Labels: namespace.GetLabels(), Annotations: namespace.GetAnnotations()} +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go new file mode 100644 index 00000000000..14faf6bc025 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go @@ -0,0 +1,42 @@ +package quotautil + +import ( + "strings" + + apierrs "k8s.io/apimachinery/pkg/api/errors" +) + +// errMessageString is a part of error message copied from quotaAdmission.Admit() method in +// k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go module +const errQuotaMessageString = `exceeded quota:` +const errQuotaUnknownMessageString = `status unknown for quota:` +const errLimitsMessageString = `exceeds the maximum limit` + +// IsErrorQuotaExceeded returns true if the given error stands for a denied request caused by detected quota +// abuse. +func IsErrorQuotaExceeded(err error) bool { + if isForbidden := apierrs.IsForbidden(err); isForbidden || apierrs.IsInvalid(err) { + lowered := strings.ToLower(err.Error()) + // the limit error message can be accompanied only by Invalid reason + if strings.Contains(lowered, errLimitsMessageString) { + return true + } + // the quota error message can be accompanied only by Forbidden reason + if isForbidden && (strings.Contains(lowered, errQuotaMessageString) || strings.Contains(lowered, errQuotaUnknownMessageString)) { + return true + } + } + return false +} + +// IsErrorLimitExceeded returns true if the given error is a limit error. +func IsErrorLimitExceeded(err error) bool { + if isForbidden := apierrs.IsForbidden(err); isForbidden || apierrs.IsInvalid(err) { + lowered := strings.ToLower(err.Error()) + // the limit error message can be accompanied only by Invalid reason + if strings.Contains(lowered, errLimitsMessageString) { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go new file mode 100644 index 00000000000..a6bfc6269ed --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go @@ -0,0 +1,48 @@ +package quotautil + +import ( + corev1 "k8s.io/api/core/v1" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func GetResourceQuotasStatusByNamespace(namespaceStatuses quotav1.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) { + for i := range namespaceStatuses { + curr := namespaceStatuses[i] + if curr.Namespace == namespace { + return curr.Status, true + } + } + return corev1.ResourceQuotaStatus{}, false +} + +func RemoveResourceQuotasStatusByNamespace(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, namespace string) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == namespace { + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + *namespaceStatuses = newNamespaceStatuses +} + +func InsertResourceQuotasStatus(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, newStatus quotav1.ResourceQuotaStatusByNamespace) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + found := false + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == newStatus.Namespace { + // do this so that we don't change serialization order + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + found = true + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + if !found { + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + } + *namespaceStatuses = newNamespaceStatuses +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapclient/client.go b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/client.go new file mode 100644 index 00000000000..81c9b50211c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/client.go @@ -0,0 +1,119 @@ +package ldapclient + +import ( + "crypto/tls" + "fmt" + "net" + + "github.com/openshift/library-go/pkg/security/ldaputil" + "k8s.io/client-go/util/cert" + + "gopkg.in/ldap.v2" +) + +// NewLDAPClientConfig returns a new LDAP client config +func NewLDAPClientConfig(URL, bindDN, bindPassword, CA string, insecure bool) (Config, error) { + url, err := ldaputil.ParseURL(URL) + if err != nil { + return nil, fmt.Errorf("Error parsing URL: %v", err) + } + + tlsConfig := &tls.Config{} + if len(CA) > 0 { + roots, err := cert.NewPool(CA) + if err != nil { + return nil, fmt.Errorf("error loading cert pool from ca file %s: %v", CA, err) + } + tlsConfig.RootCAs = roots + } + + return &ldapClientConfig{ + scheme: url.Scheme, + host: url.Host, + bindDN: bindDN, + bindPassword: bindPassword, + insecure: insecure, + tlsConfig: tlsConfig, + }, nil +} + +// ldapClientConfig holds information for connecting to an LDAP server +type ldapClientConfig struct { + // scheme is the LDAP connection scheme, either ldap or ldaps + scheme ldaputil.Scheme + // host is the host:port of the LDAP server + host string + // bindDN is an optional DN to bind with during the search phase. + bindDN string + // bindPassword is an optional password to bind with during the search phase. + bindPassword string + // insecure specifies if TLS is required for the connection. If true, either an ldap://... URL or + // StartTLS must be supported by the server + insecure bool + // tlsConfig holds the TLS options. Only used when insecure=false + tlsConfig *tls.Config +} + +// ldapClientConfig is an Config +var _ Config = &ldapClientConfig{} + +// Connect returns an established LDAP connection, or an error if the connection could not +// be made (or successfully upgraded to TLS). If no error is returned, the caller is responsible for +// closing the connection +func (l *ldapClientConfig) Connect() (ldap.Client, error) { + tlsConfig := l.tlsConfig + + // Ensure tlsConfig specifies the server we're connecting to + if tlsConfig != nil && !tlsConfig.InsecureSkipVerify && len(tlsConfig.ServerName) == 0 { + // Add to a copy of the tlsConfig to avoid mutating the original + c := tlsConfig.Clone() + if host, _, err := net.SplitHostPort(l.host); err == nil { + c.ServerName = host + } else { + c.ServerName = l.host + } + tlsConfig = c + } + + switch l.scheme { + case ldaputil.SchemeLDAP: + con, err := ldap.Dial("tcp", l.host) + if err != nil { + return nil, err + } + + // If an insecure connection is desired, we're done + if l.insecure { + return con, nil + } + + // Attempt to upgrade to TLS + if err := con.StartTLS(tlsConfig); err != nil { + // We're returning an error on a successfully opened connection + // We are responsible for closing the open connection + con.Close() + return nil, err + } + + return con, nil + + case ldaputil.SchemeLDAPS: + return ldap.DialTLS("tcp", l.host, tlsConfig) + + default: + return nil, fmt.Errorf("unsupported scheme %q", l.scheme) + } +} + +func (l *ldapClientConfig) GetBindCredentials() (string, string) { + return l.bindDN, l.bindPassword +} + +func (l *ldapClientConfig) Host() string { + return l.host +} + +// String implements Stringer for debugging purposes +func (l *ldapClientConfig) String() string { + return fmt.Sprintf("{Scheme: %v Host: %v BindDN: %v len(BbindPassword): %v Insecure: %v}", l.scheme, l.host, l.bindDN, len(l.bindPassword), l.insecure) +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapclient/interfaces.go b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/interfaces.go new file mode 100644 index 00000000000..0c4efa23853 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/interfaces.go @@ -0,0 +1,10 @@ +package ldapclient + +import "gopkg.in/ldap.v2" + +// Config knows how to connect to an LDAP server and can describe which server it is connecting to +type Config interface { + Connect() (client ldap.Client, err error) + GetBindCredentials() (bindDN, bindPassword string) + Host() string +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/errors.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/errors.go new file mode 100644 index 00000000000..cbc946f1405 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/errors.go @@ -0,0 +1,81 @@ +package ldapquery + +import ( + "fmt" + + "gopkg.in/ldap.v2" +) + +func NewNoSuchObjectError(baseDN string) error { + return &errNoSuchObject{baseDN: baseDN} +} + +// errNoSuchObject is an error that occurs when a base DN for a search refers to an object that does not exist +type errNoSuchObject struct { + baseDN string +} + +// Error returns the error string for the invalid base DN query error +func (e *errNoSuchObject) Error() string { + return fmt.Sprintf("search for entry with base dn=%q refers to a non-existent entry", e.baseDN) +} + +// IsNoSuchObjectError determines if the error is a NoSuchObjectError or if it is the upstream version of the error +// If this returns true, you are *not* safe to cast the error to a NoSuchObjectError +func IsNoSuchObjectError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*errNoSuchObject) + return ok || ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) +} + +func NewEntryNotFoundError(baseDN, filter string) error { + return &errEntryNotFound{baseDN: baseDN, filter: filter} +} + +// errEntryNotFound is an error that occurs when trying to find a specific entry fails. +type errEntryNotFound struct { + baseDN string + filter string +} + +// Error returns the error string for the entry not found error +func (e *errEntryNotFound) Error() string { + return fmt.Sprintf("search for entry with base dn=%q and filter %q did not return any results", e.baseDN, e.filter) +} + +func IsEntryNotFoundError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*errEntryNotFound) + return ok +} + +func NewQueryOutOfBoundsError(queryDN, baseDN string) error { + return &errQueryOutOfBounds{baseDN: baseDN, queryDN: queryDN} +} + +// errQueryOutOfBounds is an error that occurs when trying to search by DN for an entry that exists +// outside of the tree specified with the BaseDN for search. +type errQueryOutOfBounds struct { + baseDN string + queryDN string +} + +// Error returns the error string for the out-of-bounds query +func (q *errQueryOutOfBounds) Error() string { + return fmt.Sprintf("search for entry with dn=%q would search outside of the base dn specified (dn=%q)", q.queryDN, q.baseDN) +} + +func IsQueryOutOfBoundsError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*errQueryOutOfBounds) + return ok +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query.go new file mode 100644 index 00000000000..5e2e57fab6a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query.go @@ -0,0 +1,248 @@ +package ldapquery + +import ( + "fmt" + "strings" + + "gopkg.in/ldap.v2" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/security/ldapclient" + "github.com/openshift/library-go/pkg/security/ldaputil" +) + +// NewLDAPQuery converts a user-provided LDAPQuery into a version we can use +func NewLDAPQuery(config SerializeableLDAPQuery) (LDAPQuery, error) { + scope, err := ldaputil.DetermineLDAPScope(config.Scope) + if err != nil { + return LDAPQuery{}, err + } + + derefAliases, err := ldaputil.DetermineDerefAliasesBehavior(config.DerefAliases) + if err != nil { + return LDAPQuery{}, err + } + + return LDAPQuery{ + BaseDN: config.BaseDN, + Scope: scope, + DerefAliases: derefAliases, + TimeLimit: config.TimeLimit, + Filter: config.Filter, + PageSize: config.PageSize, + }, nil +} + +// LDAPQuery encodes an LDAP query +type LDAPQuery struct { + // The DN of the branch of the directory where all searches should start from + BaseDN string + + // The (optional) scope of the search. Defaults to the entire subtree if not set + Scope ldaputil.Scope + + // The (optional) behavior of the search with regards to alisases. Defaults to always + // dereferencing if not set + DerefAliases ldaputil.DerefAliases + + // TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding + // before the wait for a response is given up. If this is 0, no client-side limit is imposed + TimeLimit int + + // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + Filter string + + // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + PageSize int +} + +// NewSearchRequest creates a new search request for the LDAP query and optionally includes more attributes +func (q *LDAPQuery) NewSearchRequest(additionalAttributes []string) *ldap.SearchRequest { + var controls []ldap.Control + if q.PageSize > 0 { + controls = append(controls, ldap.NewControlPaging(uint32(q.PageSize))) + } + return ldap.NewSearchRequest( + q.BaseDN, + int(q.Scope), + int(q.DerefAliases), + 0, // allowed return size - indicates no limit + q.TimeLimit, + false, // not types only + q.Filter, + additionalAttributes, + controls, + ) +} + +// NewLDAPQueryOnAttribute converts a user-provided LDAPQuery into a version we can use by parsing +// the input and combining it with a set of name attributes +func NewLDAPQueryOnAttribute(config SerializeableLDAPQuery, attribute string) (LDAPQueryOnAttribute, error) { + ldapQuery, err := NewLDAPQuery(config) + if err != nil { + return LDAPQueryOnAttribute{}, err + } + + return LDAPQueryOnAttribute{ + LDAPQuery: ldapQuery, + QueryAttribute: attribute, + }, nil +} + +// LDAPQueryOnAttribute encodes an LDAP query that conjoins two filters to extract a specific LDAP entry +// This query is not self-sufficient and needs the value of the QueryAttribute to construct the final filter +type LDAPQueryOnAttribute struct { + // Query retrieves entries from an LDAP server + LDAPQuery + + // QueryAttribute is the attribute for a specific filter that, when conjoined with the common filter, + // retrieves the specific LDAP entry from the LDAP server. (e.g. "cn", when formatted with "aGroupName" + // and conjoined with "objectClass=groupOfNames", becomes (&(objectClass=groupOfNames)(cn=aGroupName))") + QueryAttribute string +} + +// NewSearchRequest creates a new search request from the identifying query by internalizing the value of +// the attribute to be filtered as well as any attributes that need to be recovered +func (o *LDAPQueryOnAttribute) NewSearchRequest(attributeValue string, attributes []string) (*ldap.SearchRequest, error) { + if strings.EqualFold(o.QueryAttribute, "dn") { + dn, err := ldap.ParseDN(attributeValue) + if err != nil { + return nil, fmt.Errorf("could not search by dn, invalid dn value: %v", err) + } + baseDN, err := ldap.ParseDN(o.BaseDN) + if err != nil { + return nil, fmt.Errorf("could not search by dn, invalid dn value: %v", err) + } + if !baseDN.AncestorOf(dn) && !baseDN.Equal(dn) { + return nil, NewQueryOutOfBoundsError(attributeValue, o.BaseDN) + } + return o.buildDNQuery(attributeValue, attributes), nil + + } else { + return o.buildAttributeQuery(attributeValue, attributes), nil + } +} + +// buildDNQuery builds the query that finds an LDAP entry with the given DN +// this is done by setting the DN to be the base DN for the search and setting the search scope +// to only consider the base object found +func (o *LDAPQueryOnAttribute) buildDNQuery(dn string, attributes []string) *ldap.SearchRequest { + var controls []ldap.Control + if o.PageSize > 0 { + controls = append(controls, ldap.NewControlPaging(uint32(o.PageSize))) + } + return ldap.NewSearchRequest( + dn, + ldap.ScopeBaseObject, // over-ride original + int(o.DerefAliases), + 0, // allowed return size - indicates no limit + o.TimeLimit, + false, // not types only + "(objectClass=*)", // filter that returns all values + attributes, + controls, + ) +} + +// buildAttributeQuery builds the query containing a filter that conjoins the common filter given +// in the configuration with the specific attribute filter for which the attribute value is given +func (o *LDAPQueryOnAttribute) buildAttributeQuery(attributeValue string, + attributes []string) *ldap.SearchRequest { + specificFilter := fmt.Sprintf("%s=%s", + ldap.EscapeFilter(o.QueryAttribute), + ldap.EscapeFilter(attributeValue)) + + filter := fmt.Sprintf("(&(%s)(%s))", o.Filter, specificFilter) + + var controls []ldap.Control + if o.PageSize > 0 { + controls = append(controls, ldap.NewControlPaging(uint32(o.PageSize))) + } + + return ldap.NewSearchRequest( + o.BaseDN, + int(o.Scope), + int(o.DerefAliases), + 0, // allowed return size - indicates no limit + o.TimeLimit, + false, // not types only + filter, + attributes, + controls, + ) +} + +// QueryForUniqueEntry queries for an LDAP entry with the given searchRequest. The query is expected +// to return one unqiue result. If this is not the case, errors are raised +func QueryForUniqueEntry(clientConfig ldapclient.Config, query *ldap.SearchRequest) (*ldap.Entry, error) { + result, err := QueryForEntries(clientConfig, query) + if err != nil { + return nil, err + } + + if len(result) == 0 { + return nil, NewEntryNotFoundError(query.BaseDN, query.Filter) + } + + if len(result) > 1 { + if query.Scope == ldap.ScopeBaseObject { + return nil, fmt.Errorf("multiple entries found matching dn=%q:\n%s", + query.BaseDN, formatResult(result)) + } else { + return nil, fmt.Errorf("multiple entries found matching filter %s:\n%s", + query.Filter, formatResult(result)) + } + } + + entry := result[0] + klog.V(4).Infof("found dn=%q for %s", entry.DN, query.Filter) + return entry, nil +} + +// formatResult pretty-prints the first ten DNs in the slice of entries +func formatResult(results []*ldap.Entry) string { + var names []string + for _, entry := range results { + names = append(names, entry.DN) + } + return "\t" + strings.Join(names[0:10], "\n\t") +} + +// QueryForEntries queries for LDAP with the given searchRequest +func QueryForEntries(clientConfig ldapclient.Config, query *ldap.SearchRequest) ([]*ldap.Entry, error) { + connection, err := clientConfig.Connect() + if err != nil { + return nil, fmt.Errorf("could not connect to the LDAP server: %v", err) + } + defer connection.Close() + + if bindDN, bindPassword := clientConfig.GetBindCredentials(); len(bindDN) > 0 { + if err := connection.Bind(bindDN, bindPassword); err != nil { + return nil, fmt.Errorf("could not bind to the LDAP server: %v", err) + } + } + + var searchResult *ldap.SearchResult + control := ldap.FindControl(query.Controls, ldap.ControlTypePaging) + if control == nil { + klog.V(4).Infof("searching LDAP server with config %v with dn=%q and scope %v for %s requesting %v", clientConfig, query.BaseDN, query.Scope, query.Filter, query.Attributes) + searchResult, err = connection.Search(query) + } else if pagingControl, ok := control.(*ldap.ControlPaging); ok { + klog.V(4).Infof("searching LDAP server with config %v with dn=%q and scope %v for %s requesting %v with pageSize=%d", clientConfig, query.BaseDN, query.Scope, query.Filter, query.Attributes, pagingControl.PagingSize) + searchResult, err = connection.SearchWithPaging(query, pagingControl.PagingSize) + } else { + err = fmt.Errorf("invalid paging control type: %v", control) + } + + if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + return nil, NewNoSuchObjectError(query.BaseDN) + } + return nil, err + } + + for _, entry := range searchResult.Entries { + klog.V(4).Infof("found dn=%q ", entry.DN) + } + return searchResult.Entries, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query_test.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query_test.go new file mode 100644 index 00000000000..ea18fa28d63 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query_test.go @@ -0,0 +1,320 @@ +package ldapquery + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "github.com/openshift/library-go/pkg/security/ldaptestclient" + "github.com/openshift/library-go/pkg/security/ldaputil" + "gopkg.in/ldap.v2" +) + +const ( + DefaultBaseDN string = "dc=example,dc=com" + DefaultScope ldaputil.Scope = ldaputil.ScopeWholeSubtree + DefaultDerefAliases ldaputil.DerefAliases = ldaputil.DerefAliasesAlways + DefaultSizeLimit int = 0 + DefaultTimeLimit int = 0 + DefaultTypesOnly bool = false + DefaultFilter string = "objectClass=groupOfNames" + DefaultQueryAttribute string = "uid" +) + +var DefaultAttributes = []string{"dn", "cn", "uid"} +var DefaultControls []ldap.Control + +func TestNewSearchRequest(t *testing.T) { + var testCases = []struct { + name string + options LDAPQueryOnAttribute + attributeValue string + attributes []string + expectedRequest *ldap.SearchRequest + expectedError bool + }{ + { + name: "attribute query no attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: DefaultQueryAttribute, + }, + + attributeValue: "bar", + attributes: DefaultAttributes, + expectedRequest: &ldap.SearchRequest{ + BaseDN: DefaultBaseDN, + Scope: int(DefaultScope), + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: fmt.Sprintf("(&(%s)(%s=%s))", DefaultFilter, DefaultQueryAttribute, "bar"), + Attributes: DefaultAttributes, + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "attribute query with additional attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: DefaultQueryAttribute, + }, + attributeValue: "bar", + attributes: append(DefaultAttributes, []string{"email", "phone"}...), + expectedRequest: &ldap.SearchRequest{ + BaseDN: DefaultBaseDN, + Scope: int(DefaultScope), + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: fmt.Sprintf("(&(%s)(%s=%s))", DefaultFilter, DefaultQueryAttribute, "bar"), + Attributes: append(DefaultAttributes, []string{"email", "phone"}...), + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "valid dn query no attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=john,o=users,dc=example,dc=com", + attributes: DefaultAttributes, + expectedRequest: &ldap.SearchRequest{ + BaseDN: "uid=john,o=users,dc=example,dc=com", + Scope: ldap.ScopeBaseObject, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: DefaultAttributes, + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "valid dn query with additional attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=john,o=users,dc=example,dc=com", + attributes: append(DefaultAttributes, []string{"email", "phone"}...), + expectedRequest: &ldap.SearchRequest{ + BaseDN: "uid=john,o=users,dc=example,dc=com", + Scope: ldap.ScopeBaseObject, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: append(DefaultAttributes, []string{"email", "phone"}...), + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "invalid dn query out of bounds", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=john,o=users,dc=other,dc=com", + attributes: DefaultAttributes, + expectedRequest: nil, + expectedError: true, + }, + { + name: "invalid dn query invalid dn", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=,o=users,dc=other,dc=com", + attributes: DefaultAttributes, + expectedRequest: nil, + expectedError: true, + }, + { + name: "attribute query no attributes with paging", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + PageSize: 10, + }, + QueryAttribute: DefaultQueryAttribute, + }, + + attributeValue: "bar", + attributes: DefaultAttributes, + expectedRequest: &ldap.SearchRequest{ + BaseDN: DefaultBaseDN, + Scope: int(DefaultScope), + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: fmt.Sprintf("(&(%s)(%s=%s))", DefaultFilter, DefaultQueryAttribute, "bar"), + Attributes: DefaultAttributes, + Controls: []ldap.Control{ldap.NewControlPaging(10)}, + }, + expectedError: false, + }, + } + + for _, testCase := range testCases { + request, err := testCase.options.NewSearchRequest( + testCase.attributeValue, + testCase.attributes) + + switch { + case err != nil && !testCase.expectedError: + t.Errorf("%s: expected no error but got: %v", testCase.name, err) + case err == nil && testCase.expectedError: + t.Errorf("%s: expected an error but got none", testCase.name) + } + + if !reflect.DeepEqual(testCase.expectedRequest, request) { + t.Errorf("%s: did not correctly create search request:\n\texpected:\n%#v\n\tgot:\n%#v", + testCase.name, testCase.expectedRequest, request) + } + } +} + +// TestErrNoSuchObject tests that our LDAP search correctly wraps the LDAP server error +func TestErrNoSuchObject(t *testing.T) { + var testCases = []struct { + name string + searchRequest *ldap.SearchRequest + expectedError error + }{ + { + name: "valid search", + searchRequest: &ldap.SearchRequest{ + BaseDN: "uid=john,o=users,dc=example,dc=com", + }, + expectedError: nil, + }, + { + name: "invalid search", + searchRequest: &ldap.SearchRequest{ + BaseDN: "ou=groups,dc=example,dc=com", + }, + expectedError: &errNoSuchObject{baseDN: "ou=groups,dc=example,dc=com"}, + }, + } + for _, testCase := range testCases { + testClient := ldaptestclient.NewMatchingSearchErrorClient(ldaptestclient.New(), + "ou=groups,dc=example,dc=com", + ldap.NewError(ldap.LDAPResultNoSuchObject, errors.New("")), + ) + testConfig := ldaptestclient.NewConfig(testClient) + if _, err := QueryForEntries(testConfig, testCase.searchRequest); !reflect.DeepEqual(err, testCase.expectedError) { + t.Errorf("%s: error did not match:\n\texpected:\n\t%v\n\tgot:\n\t%v", testCase.name, testCase.expectedError, err) + } + } +} + +// TestErrEntryNotFound checks that we wrap a zero-length list of results correctly if we search for a unique entry +func TestErrEntryNotFound(t *testing.T) { + testConfig := ldaptestclient.NewConfig(ldaptestclient.New()) + testSearchRequest := &ldap.SearchRequest{ + BaseDN: "dc=example,dc=com", + Scope: ldap.ScopeWholeSubtree, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: append(DefaultAttributes), + Controls: DefaultControls, + } + + expectedErr := &errEntryNotFound{baseDN: "dc=example,dc=com", filter: "(objectClass=*)"} + + // test that a unique search errors on no result + if _, err := QueryForUniqueEntry(testConfig, testSearchRequest); !reflect.DeepEqual(err, expectedErr) { + t.Errorf("query for unique entry did not get correct error:\n\texpected:\n\t%v\n\tgot:\n\t%v", expectedErr, err) + } + + // test that a non-unique search doesn't error + if _, err := QueryForEntries(testConfig, testSearchRequest); !reflect.DeepEqual(err, nil) { + t.Errorf("query for entries did not get correct error:\n\texpected:\n\t%v\n\tgot:\n\t%v", nil, err) + } +} + +func TestQueryWithPaging(t *testing.T) { + expectedResult := &ldap.SearchResult{ + Entries: []*ldap.Entry{ldap.NewEntry("cn=paging,ou=paging,dc=paging,dc=com", map[string][]string{"paging": {"true"}})}, + } + + testConfig := ldaptestclient.NewConfig(ldaptestclient.NewPagingOnlyClient(ldaptestclient.New(), + expectedResult, + )) + testSearchRequest := &ldap.SearchRequest{ + BaseDN: "dc=example,dc=com", + Scope: ldap.ScopeWholeSubtree, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: append(DefaultAttributes), + Controls: []ldap.Control{ldap.NewControlPaging(5)}, + } + + // test that a search request with paging controls gets correctly routed to the SearchWithPaging call + response, err := QueryForEntries(testConfig, testSearchRequest) + if err != nil { + t.Errorf("query with paging control should not create error, but got %v", err) + } + if !reflect.DeepEqual(expectedResult.Entries, response) { + t.Errorf("query with paging did not return correct response: expected %v, got %v", expectedResult.Entries, response) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/types.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/types.go new file mode 100644 index 00000000000..a321df70a1d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/types.go @@ -0,0 +1,31 @@ +package ldapquery + +type SerializeableLDAPQuery struct { + // The DN of the branch of the directory where all searches should start from + BaseDN string + + // The (optional) scope of the search. Can be: + // base: only the base object, + // one: all object on the base level, + // sub: the entire subtree + // Defaults to the entire subtree if not set + Scope string + + // The (optional) behavior of the search with regards to alisases. Can be: + // never: never dereference aliases, + // search: only dereference in searching, + // base: only dereference in finding the base object, + // always: always dereference + // Defaults to always dereferencing if not set + DerefAliases string + + // TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding + // before the wait for a response is given up. If this is 0, no client-side limit is imposed + TimeLimit int + + // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + Filter string + + // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + PageSize int +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclient.go b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclient.go new file mode 100644 index 00000000000..46bd5fdc0ad --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclient.go @@ -0,0 +1,170 @@ +package ldaptestclient + +import ( + "crypto/tls" + "time" + + "gopkg.in/ldap.v2" +) + +// Fake is a mock client for an LDAP server +// The following methods define safe defaults for the return values. In order to adapt this test client +// for a specific test, anonymously include it and override the method being tested. In the over-riden +// method, if you are not covering all method calls with your override, defer to the parent for handling. +type Fake struct { + SimpleBindResponse *ldap.SimpleBindResult + PasswordModifyResponse *ldap.PasswordModifyResult + SearchResponse *ldap.SearchResult +} + +var _ ldap.Client = &Fake{} + +// NewTestClient returns a new test client with safe default return values +func New() *Fake { + return &Fake{ + SimpleBindResponse: &ldap.SimpleBindResult{ + Controls: []ldap.Control{}, + }, + PasswordModifyResponse: &ldap.PasswordModifyResult{ + GeneratedPassword: "", + }, + SearchResponse: &ldap.SearchResult{ + Entries: []*ldap.Entry{}, + Referrals: []string{}, + Controls: []ldap.Control{}, + }, + } +} + +// Start starts the LDAP connection +func (c *Fake) Start() { + return +} + +// StartTLS begins a TLS-wrapped LDAP connection +func (c *Fake) StartTLS(config *tls.Config) error { + return nil +} + +// Close closes an LDAP connection +func (c *Fake) Close() { + return +} + +// Bind binds to the LDAP server with a bind DN and password +func (c *Fake) Bind(username, password string) error { + return nil +} + +// SimpleBind binds to the LDAP server using the Simple Bind mechanism +func (c *Fake) SimpleBind(simpleBindRequest *ldap.SimpleBindRequest) (*ldap.SimpleBindResult, error) { + return c.SimpleBindResponse, nil +} + +// Add forwards an addition request to the LDAP server +func (c *Fake) Add(addRequest *ldap.AddRequest) error { + return nil +} + +// Del forwards a deletion request to the LDAP server +func (c *Fake) Del(delRequest *ldap.DelRequest) error { + return nil +} + +// Modify forwards a modification request to the LDAP server +func (c *Fake) Modify(modifyRequest *ldap.ModifyRequest) error { + return nil +} + +// Compare ... ? +func (c *Fake) Compare(dn, attribute, value string) (bool, error) { + return false, nil +} + +// PasswordModify forwards a password modify request to the LDAP server +func (c *Fake) PasswordModify(passwordModifyRequest *ldap.PasswordModifyRequest) (*ldap.PasswordModifyResult, error) { + return c.PasswordModifyResponse, nil +} + +// Search forwards a search request to the LDAP server +func (c *Fake) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + return c.SearchResponse, nil +} + +// SearchWithPaging forwards a search request to the LDAP server and pages the response +func (c *Fake) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize uint32) (*ldap.SearchResult, error) { + return c.SearchResponse, nil +} + +// SetTimeout sets a timeout on the client +func (c *Fake) SetTimeout(d time.Duration) { +} + +// NewMatchingSearchErrorClient returns a new MatchingSearchError client sitting on top of the parent +// client. This client returns the given error when a search base DN matches the given base DN, and +// defers to the parent otherwise. +func NewMatchingSearchErrorClient(parent ldap.Client, baseDN string, returnErr error) ldap.Client { + return &MatchingSearchErrClient{ + Client: parent, + BaseDN: baseDN, + ReturnErr: returnErr, + } +} + +// MatchingSearchErrClient returns the ReturnErr on every Search() where the search base DN matches the given DN +// or defers the search to the parent client +type MatchingSearchErrClient struct { + ldap.Client + BaseDN string + ReturnErr error +} + +func (c *MatchingSearchErrClient) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + if searchRequest.BaseDN == c.BaseDN { + return nil, c.ReturnErr + } + return c.Client.Search(searchRequest) +} + +// NewDNMappingClient returns a new DNMappingClient sitting on top of the parent client. This client returns the +// ldap entries mapped to with this DN in its' internal DN map, or defers to the parent if the DN is not mapped. +func NewDNMappingClient(parent ldap.Client, DNMapping map[string][]*ldap.Entry) ldap.Client { + return &DNMappingClient{ + Client: parent, + DNMapping: DNMapping, + } +} + +// DNMappingClient returns the LDAP entry mapped to by the base dn given, or if no mapping happens, defers to the parent +type DNMappingClient struct { + ldap.Client + DNMapping map[string][]*ldap.Entry +} + +func (c *DNMappingClient) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + if entries, exists := c.DNMapping[searchRequest.BaseDN]; exists { + return &ldap.SearchResult{Entries: entries}, nil + } + + return c.Client.Search(searchRequest) +} + +// NewPagingOnlyClient returns a new PagingOnlyClient sitting on top of the parent client. This client returns the +// provided search response for any calls to SearchWithPaging, or defers to the parent if the call is not to the +// paged search function. +func NewPagingOnlyClient(parent ldap.Client, response *ldap.SearchResult) ldap.Client { + return &PagingOnlyClient{ + Client: parent, + Response: response, + } +} + +// PagingOnlyClient responds with a canned search result for any calls to SearchWithPaging +type PagingOnlyClient struct { + ldap.Client + Response *ldap.SearchResult +} + +func (c *PagingOnlyClient) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize uint32) (*ldap.SearchResult, error) { + return c.Response, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclientconfig.go b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclientconfig.go new file mode 100644 index 00000000000..6106de0e9f3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclientconfig.go @@ -0,0 +1,30 @@ +package ldaptestclient + +import ( + "github.com/openshift/library-go/pkg/security/ldapclient" + "gopkg.in/ldap.v2" +) + +// fakeConfig regurgitates internal state in order to conform to Config +type fakeConfig struct { + client ldap.Client +} + +// NewConfig creates a new Config impl that regurgitates the given data +func NewConfig(client ldap.Client) ldapclient.Config { + return &fakeConfig{ + client: client, + } +} + +func (c *fakeConfig) Connect() (ldap.Client, error) { + return c.client, nil +} + +func (c *fakeConfig) GetBindCredentials() (string, string) { + return "", "" +} + +func (c *fakeConfig) Host() string { + return "" +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go new file mode 100644 index 00000000000..16ca72231b4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go @@ -0,0 +1,47 @@ +package ldaputil + +import ( + "encoding/base64" + "strings" + + "gopkg.in/ldap.v2" +) + +// GetAttributeValue finds the first attribute of those given that the LDAP entry has, and +// returns it. GetAttributeValue is able to query the DN as well as Attributes of the LDAP entry. +// If no value is found, the empty string is returned. +func GetAttributeValue(entry *ldap.Entry, attributes []string) string { + for _, k := range attributes { + // Ignore empty attributes + if len(k) == 0 { + continue + } + // Special-case DN, since it's not an attribute + if strings.ToLower(k) == "dn" { + return entry.DN + } + // Otherwise get an attribute and return it if present + if v := entry.GetAttributeValue(k); len(v) > 0 { + return v + } + } + return "" +} + +func GetRawAttributeValue(entry *ldap.Entry, attributes []string) string { + for _, k := range attributes { + // Ignore empty attributes + if len(k) == 0 { + continue + } + // Special-case DN, since it's not an attribute + if strings.ToLower(k) == "dn" { + return base64.RawURLEncoding.EncodeToString([]byte(entry.DN)) + } + // Otherwise get an attribute and return it if present + if v := entry.GetRawAttributeValue(k); len(v) > 0 { + return base64.RawURLEncoding.EncodeToString(v) + } + } + return "" +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute_test.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute_test.go new file mode 100644 index 00000000000..c709e39f41b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute_test.go @@ -0,0 +1,70 @@ +package ldaputil + +import ( + "testing" + + "gopkg.in/ldap.v2" +) + +func TestGetAttributeValue(t *testing.T) { + testcases := map[string]struct { + Entry *ldap.Entry + Attributes []string + ExpectedValue string + }{ + "empty": { + Attributes: []string{}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "", + }, + + "dn": { + Attributes: []string{"dn"}, + Entry: &ldap.Entry{DN: "foo", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "foo", + }, + "DN": { + Attributes: []string{"DN"}, + Entry: &ldap.Entry{DN: "foo", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "foo", + }, + + "missing": { + Attributes: []string{"foo", "bar", "baz"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "", + }, + + "present": { + Attributes: []string{"foo"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{ + {Name: "foo", Values: []string{"fooValue"}}, + }}, + ExpectedValue: "fooValue", + }, + "first of multi-value attribute": { + Attributes: []string{"foo"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{ + {Name: "foo", Values: []string{"fooValue", "fooValue2"}}, + }}, + ExpectedValue: "fooValue", + }, + "first present attribute": { + Attributes: []string{"foo", "bar", "baz"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{ + {Name: "foo", Values: []string{""}}, + {Name: "bar", Values: []string{"barValue"}}, + {Name: "baz", Values: []string{"bazValue"}}, + }}, + ExpectedValue: "barValue", + }, + } + + for k, tc := range testcases { + v := GetAttributeValue(tc.Entry, tc.Attributes) + if v != tc.ExpectedValue { + t.Errorf("%s: Expected %q, got %q", k, tc.ExpectedValue, v) + } + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go new file mode 100644 index 00000000000..caf64963ac4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go @@ -0,0 +1,247 @@ +package ldaputil + +import ( + "fmt" + "net" + "net/url" + "strings" + + "gopkg.in/ldap.v2" +) + +// Scheme is a valid ldap scheme +type Scheme string + +const ( + SchemeLDAP Scheme = "ldap" + SchemeLDAPS Scheme = "ldaps" +) + +// Scope is a valid LDAP search scope +type Scope int + +const ( + ScopeWholeSubtree Scope = ldap.ScopeWholeSubtree + ScopeSingleLevel Scope = ldap.ScopeSingleLevel + ScopeBaseObject Scope = ldap.ScopeBaseObject +) + +// DerefAliases is a valid LDAP alias dereference parameter +type DerefAliases int + +const ( + DerefAliasesNever = ldap.NeverDerefAliases + DerefAliasesSearching = ldap.DerefInSearching + DerefAliasesFinding = ldap.DerefFindingBaseObj + DerefAliasesAlways = ldap.DerefAlways +) + +const ( + defaultLDAPPort = "389" + defaultLDAPSPort = "636" + + defaultHost = "localhost" + defaultQueryAttribute = "uid" + defaultFilter = "(objectClass=*)" + + scopeWholeSubtreeString = "sub" + scopeSingleLevelString = "one" + scopeBaseObjectString = "base" + + criticalExtensionPrefix = "!" +) + +// LDAPURL holds a parsed RFC 2255 URL +type LDAPURL struct { + // Scheme is ldap or ldaps + Scheme Scheme + // Host is the host:port of the LDAP server + Host string + // The DN of the branch of the directory where all searches should start from + BaseDN string + // The attribute to search for + QueryAttribute string + // The scope of the search. Can be ldap.ScopeWholeSubtree, ldap.ScopeSingleLevel, or ldap.ScopeBaseObject + Scope Scope + // A valid LDAP search filter (e.g. "(objectClass=*)") + Filter string +} + +// ParseURL parsed the given ldapURL as an RFC 2255 URL +// The syntax of the URL is ldap://host:port/basedn?attribute?scope?filter +func ParseURL(ldapURL string) (LDAPURL, error) { + // Must be a valid URL to start + parsedURL, err := url.Parse(ldapURL) + if err != nil { + return LDAPURL{}, err + } + + opts := LDAPURL{} + + determinedScheme, err := DetermineLDAPScheme(parsedURL.Scheme) + if err != nil { + return LDAPURL{}, err + } + opts.Scheme = determinedScheme + + determinedHost, err := DetermineLDAPHost(parsedURL.Host, opts.Scheme) + if err != nil { + return LDAPURL{}, err + } + opts.Host = determinedHost + + // Set base dn (default to "") + // url.Parse() already percent-decodes the path + opts.BaseDN = strings.TrimLeft(parsedURL.Path, "/") + + attributes, scope, filter, extensions, err := SplitLDAPQuery(parsedURL.RawQuery) + if err != nil { + return LDAPURL{}, err + } + + // Attributes contains comma-separated attributes + // Set query attribute to first attribute + // Default to uid to match mod_auth_ldap + opts.QueryAttribute = strings.Split(attributes, ",")[0] + if len(opts.QueryAttribute) == 0 { + opts.QueryAttribute = defaultQueryAttribute + } + + determinedScope, err := DetermineLDAPScope(scope) + if err != nil { + return LDAPURL{}, err + } + opts.Scope = determinedScope + + determinedFilter, err := DetermineLDAPFilter(filter) + if err != nil { + return LDAPURL{}, err + } + opts.Filter = determinedFilter + + // Extensions are in "name=value,name2=value2" form + // Critical extensions are prefixed with a ! + // Optional extensions are ignored, per RFC + // Fail if there are any critical extensions, since we don't support any + if len(extensions) > 0 { + for _, extension := range strings.Split(extensions, ",") { + exttype := strings.SplitN(extension, "=", 2)[0] + if strings.HasPrefix(exttype, criticalExtensionPrefix) { + return LDAPURL{}, fmt.Errorf("unsupported critical extension %s", extension) + } + } + } + + return opts, nil + +} + +// DetermineLDAPScheme determines the LDAP connection scheme. Scheme is one of "ldap" or "ldaps" +// Default to "ldap" +func DetermineLDAPScheme(scheme string) (Scheme, error) { + switch Scheme(scheme) { + case SchemeLDAP, SchemeLDAPS: + return Scheme(scheme), nil + default: + return "", fmt.Errorf("invalid scheme %q", scheme) + } +} + +// DetermineLDAPHost determines the host and port for the LDAP connection. +// The default host is localhost; the default port for scheme "ldap" is 389, for "ldaps" is 686 +func DetermineLDAPHost(hostport string, scheme Scheme) (string, error) { + if len(hostport) == 0 { + hostport = defaultHost + } + // add port if missing + if _, _, err := net.SplitHostPort(hostport); err != nil { + switch scheme { + case SchemeLDAPS: + return net.JoinHostPort(hostport, defaultLDAPSPort), nil + case SchemeLDAP: + return net.JoinHostPort(hostport, defaultLDAPPort), nil + default: + return "", fmt.Errorf("no default port for scheme %q", scheme) + } + } + // nothing needed to be done + return hostport, nil +} + +// SplitLDAPQuery splits the query in the URL into the substituent parts. All sections are optional. +// Query syntax is attribute?scope?filter?extensions +func SplitLDAPQuery(query string) (attributes, scope, filter, extensions string, err error) { + parts := strings.Split(query, "?") + switch len(parts) { + case 4: + extensions = parts[3] + fallthrough + case 3: + if v, err := url.QueryUnescape(parts[2]); err != nil { + return "", "", "", "", err + } else { + filter = v + } + fallthrough + case 2: + if v, err := url.QueryUnescape(parts[1]); err != nil { + return "", "", "", "", err + } else { + scope = v + } + fallthrough + case 1: + if v, err := url.QueryUnescape(parts[0]); err != nil { + return "", "", "", "", err + } else { + attributes = v + } + return attributes, scope, filter, extensions, nil + case 0: + return + default: + err = fmt.Errorf("too many query options %q", query) + return "", "", "", "", err + } +} + +// DetermineLDAPScope determines the LDAP search scope. Scope is one of "sub", "one", or "base" +// Default to "sub" to match mod_auth_ldap +func DetermineLDAPScope(scope string) (Scope, error) { + switch scope { + case "", scopeWholeSubtreeString: + return ScopeWholeSubtree, nil + case scopeSingleLevelString: + return ScopeSingleLevel, nil + case scopeBaseObjectString: + return ScopeBaseObject, nil + default: + return -1, fmt.Errorf("invalid scope %q", scope) + } +} + +// DetermineLDAPFilter determines the LDAP search filter. Filter is a valid LDAP filter +// Default to "(objectClass=*)" per RFC +func DetermineLDAPFilter(filter string) (string, error) { + if len(filter) == 0 { + return defaultFilter, nil + } + if _, err := ldap.CompileFilter(filter); err != nil { + return "", fmt.Errorf("invalid filter: %v", err) + } + return filter, nil +} + +func DetermineDerefAliasesBehavior(derefAliasesString string) (DerefAliases, error) { + mapping := map[string]DerefAliases{ + "never": DerefAliasesNever, + "search": DerefAliasesSearching, + "base": DerefAliasesFinding, + "always": DerefAliasesAlways, + } + derefAliases, exists := mapping[derefAliasesString] + if !exists { + return -1, fmt.Errorf("not a valid LDAP alias dereferncing behavior: %s", derefAliasesString) + } + return derefAliases, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url_test.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url_test.go new file mode 100644 index 00000000000..29b7842b395 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url_test.go @@ -0,0 +1,103 @@ +package ldaputil + +import ( + "reflect" + "testing" + + "gopkg.in/ldap.v2" +) + +func TestParseURL(t *testing.T) { + testcases := map[string]struct { + URL string + ExpectedLDAPURL LDAPURL + ExpectedError string + }{ + // Defaults + "defaults for ldap://": { + URL: "ldap://", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "localhost:389", BaseDN: "", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + "defaults for ldaps://": { + URL: "ldaps://", + ExpectedLDAPURL: LDAPURL{Scheme: "ldaps", Host: "localhost:636", BaseDN: "", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + + // Valid + "fully specified": { + URL: "ldap://myhost:123/o=myorg?cn?one?(o=mygroup*)?ext=1", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=myorg", QueryAttribute: "cn", Scope: ldap.ScopeSingleLevel, Filter: "(o=mygroup*)"}, + }, + "first attribute used for query": { + URL: "ldap://myhost:123/o=myorg?cn,uid?one?(o=mygroup*)?ext=1", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=myorg", QueryAttribute: "cn", Scope: ldap.ScopeSingleLevel, Filter: "(o=mygroup*)"}, + }, + + // Escaping + "percent escaped 1": { + URL: "ldap://myhost:123/o=my%20org?my%20attr?one?(o=my%20group%3f*)?ext=1", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=my org", QueryAttribute: "my attr", Scope: ldap.ScopeSingleLevel, Filter: "(o=my group?*)"}, + }, + "percent escaped 2": { + URL: "ldap://myhost:123/o=Babsco,c=US???(four-octet=%5c00%5c00%5c00%5c04)", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=Babsco,c=US", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: `(four-octet=\00\00\00\04)`}, + }, + "percent escaped 3": { + URL: "ldap://myhost:123/o=An%20Example%5C2C%20Inc.,c=US", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: `o=An Example\2C Inc.,c=US`, QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + + // Invalid + "empty": { + URL: "", + ExpectedError: `invalid scheme ""`, + }, + "invalid scheme": { + URL: "http://myhost:123/o=myorg?cn?one?(o=mygroup*)?ext=1", + ExpectedError: `invalid scheme "http"`, + }, + "invalid scope": { + URL: "ldap://myhost:123/o=myorg?cn?foo?(o=mygroup*)?ext=1", + ExpectedError: `invalid scope "foo"`, + }, + "invalid filter": { + URL: "ldap://myhost:123/o=myorg?cn?one?(mygroup*)?ext=1", + ExpectedError: `invalid filter: LDAP Result Code 201 "Filter Compile Error": ldap: error parsing filter`, + }, + "invalid segments": { + URL: "ldap://myhost:123/o=myorg?cn?one?(o=mygroup*)?ext=1?extrasegment", + ExpectedError: `too many query options "cn?one?(o=mygroup*)?ext=1?extrasegment"`, + }, + + // Extension handling + "ignored optional extension": { + URL: "ldap:///??sub??e-bindname=cn=Manager%2cdc=example%2cdc=com", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "localhost:389", BaseDN: "", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + "rejected required extension": { + URL: "ldap:///??sub??!e-bindname=cn=Manager%2cdc=example%2cdc=com", + ExpectedError: "unsupported critical extension !e-bindname=cn=Manager%2cdc=example%2cdc=com", + }, + } + + for k, tc := range testcases { + ldapURL, err := ParseURL(tc.URL) + if err != nil { + if len(tc.ExpectedError) == 0 { + t.Errorf("%s: Unexpected error: %v", k, err) + } + if err.Error() != tc.ExpectedError { + t.Errorf("%s: Expected error %q, got %v", k, tc.ExpectedError, err) + } + continue + } + if len(tc.ExpectedError) > 0 { + t.Errorf("%s: Expected error %q, got none", k, tc.ExpectedError) + continue + } + if !reflect.DeepEqual(tc.ExpectedLDAPURL, ldapURL) { + t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedLDAPURL, ldapURL) + continue + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go b/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go new file mode 100644 index 00000000000..836a71a5a41 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go @@ -0,0 +1,125 @@ +package uid + +import ( + "fmt" + "strings" +) + +type Block struct { + Start uint32 + End uint32 +} + +var ( + ErrBlockSlashBadFormat = fmt.Errorf("block not in the format \"/\"") + ErrBlockDashBadFormat = fmt.Errorf("block not in the format \"-\"") +) + +func ParseBlock(in string) (Block, error) { + if strings.Contains(in, "/") { + var start, size uint32 + n, err := fmt.Sscanf(in, "%d/%d", &start, &size) + if err != nil { + return Block{}, err + } + if n != 2 { + return Block{}, ErrBlockSlashBadFormat + } + return Block{Start: start, End: start + size - 1}, nil + } + + var start, end uint32 + n, err := fmt.Sscanf(in, "%d-%d", &start, &end) + if err != nil { + return Block{}, err + } + if n != 2 { + return Block{}, ErrBlockDashBadFormat + } + return Block{Start: start, End: end}, nil +} + +func (b Block) String() string { + return fmt.Sprintf("%d/%d", b.Start, b.Size()) +} + +func (b Block) RangeString() string { + return fmt.Sprintf("%d-%d", b.Start, b.End) +} + +func (b Block) Size() uint32 { + return b.End - b.Start + 1 +} + +type Range struct { + block Block + size uint32 +} + +func NewRange(start, end, size uint32) (*Range, error) { + if start > end { + return nil, fmt.Errorf("start %d must be less than end %d", start, end) + } + if size == 0 { + return nil, fmt.Errorf("block size must be a positive integer") + } + if (end - start) < size { + return nil, fmt.Errorf("block size must be less than or equal to the range") + } + return &Range{ + block: Block{start, end}, + size: size, + }, nil +} + +func ParseRange(in string) (*Range, error) { + var start, end, block uint32 + n, err := fmt.Sscanf(in, "%d-%d/%d", &start, &end, &block) + if err != nil { + return nil, err + } + if n != 3 { + return nil, fmt.Errorf("range not in the format \"-/\"") + } + return NewRange(start, end, block) +} + +func (r *Range) Size() uint32 { + return r.block.Size() / r.size +} + +func (r *Range) String() string { + return fmt.Sprintf("%s/%d", r.block.RangeString(), r.size) +} + +func (r *Range) BlockAt(offset uint32) (Block, bool) { + if offset > r.Size() { + return Block{}, false + } + start := r.block.Start + offset*r.size + return Block{ + Start: start, + End: start + r.size - 1, + }, true +} + +func (r *Range) Contains(block Block) bool { + ok, _ := r.Offset(block) + return ok +} + +func (r *Range) Offset(block Block) (bool, uint32) { + if block.Start < r.block.Start { + return false, 0 + } + if block.End > r.block.End { + return false, 0 + } + if block.End-block.Start+1 != r.size { + return false, 0 + } + if (block.Start-r.block.Start)%r.size != 0 { + return false, 0 + } + return true, (block.Start - r.block.Start) / r.size +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/uid/uid_test.go b/vendor/github.com/openshift/library-go/pkg/security/uid/uid_test.go new file mode 100644 index 00000000000..c8fbbf4ca72 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/uid/uid_test.go @@ -0,0 +1,157 @@ +package uid + +import ( + "strings" + "testing" +) + +func TestParseRange(t *testing.T) { + testCases := map[string]struct { + in string + errFn func(error) bool + r Range + total uint32 + }{ + "identity range": { + in: "1-1/1", + r: Range{ + block: Block{1, 1}, + size: 1, + }, + total: 1, + }, + "simple range": { + in: "1-2/1", + r: Range{ + block: Block{1, 2}, + size: 1, + }, + total: 2, + }, + "wide range": { + in: "10000-999999/1000", + r: Range{ + block: Block{10000, 999999}, + size: 1000, + }, + total: 990, + }, + "overflow uint": { + in: "1000-100000000000000/1", + errFn: func(err error) bool { return strings.Contains(err.Error(), "unsigned integer overflow") }, + }, + "negative range": { + in: "1000-999/1", + errFn: func(err error) bool { return strings.Contains(err.Error(), "must be less than end 999") }, + }, + "zero block size": { + in: "1000-1000/0", + errFn: func(err error) bool { return strings.Contains(err.Error(), "block size must be a positive integer") }, + }, + "large block size": { + in: "1000-1001/3", + errFn: func(err error) bool { return strings.Contains(err.Error(), "must be less than or equal to the range") }, + }, + } + + for s, testCase := range testCases { + r, err := ParseRange(testCase.in) + if testCase.errFn != nil && !testCase.errFn(err) { + t.Errorf("%s: unexpected error: %v", s, err) + continue + } + if err != nil { + continue + } + if r.block.Start != testCase.r.block.Start || r.block.End != testCase.r.block.End || r.size != testCase.r.size { + t.Errorf("%s: unexpected range: %#v", s, r) + } + if r.Size() != testCase.total { + t.Errorf("%s: unexpected total: %d", s, r.Size()) + } + } +} + +func TestBlock(t *testing.T) { + b := Block{Start: 100, End: 109} + if b.String() != "100/10" { + t.Errorf("unexpected block string: %s", b.String()) + } + b, err := ParseBlock("100-109") + if err != nil { + t.Fatal(err) + } + if b.String() != "100/10" { + t.Errorf("unexpected block string: %s", b.String()) + } +} + +func TestOffset(t *testing.T) { + testCases := map[string]struct { + r Range + block Block + contained bool + offset uint32 + }{ + "identity range": { + r: Range{ + block: Block{1, 1}, + size: 1, + }, + block: Block{1, 1}, + contained: true, + }, + "out of identity range": { + r: Range{ + block: Block{1, 1}, + size: 1, + }, + block: Block{2, 2}, + }, + "out of identity range expanded": { + r: Range{ + block: Block{1, 1}, + size: 1, + }, + block: Block{2, 3}, + }, + "aligned to offset": { + r: Range{ + block: Block{0, 100}, + size: 10, + }, + block: Block{10, 19}, + contained: true, + offset: 1, + }, + "not aligned": { + r: Range{ + block: Block{0, 100}, + size: 10, + }, + block: Block{11, 20}, + }, + } + + for s, testCase := range testCases { + contained, offset := testCase.r.Offset(testCase.block) + if contained != testCase.contained { + t.Errorf("%s: unexpected contained: %t", s, contained) + continue + } + if offset != testCase.offset { + t.Errorf("%s: unexpected offset: %d", s, offset) + continue + } + if contained { + block, ok := testCase.r.BlockAt(offset) + if !ok { + t.Errorf("%s: should find block", s) + continue + } + if block != testCase.block { + t.Errorf("%s: blocks are not equivalent: %#v", s, block) + } + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go b/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go new file mode 100644 index 00000000000..2f84af74296 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go @@ -0,0 +1,50 @@ +package serviceability + +import ( + "os" + "strings" + + "k8s.io/klog" + + "github.com/sirupsen/logrus" +) + +// InitLogrusFromKlog sets the logrus trace level based on the klog trace level. +func InitLogrusFromKlog() { + switch { + case bool(klog.V(4)): + InitLogrus("DEBUG") + case bool(klog.V(2)): + InitLogrus("INFO") + case bool(klog.V(0)): + InitLogrus("WARN") + } +} + +// InitLogrus initializes logrus by setting a loglevel for it. +func InitLogrus(level string) { + if len(level) == 0 { + return + } + level = strings.ToUpper(level) + switch level { + case "DEBUG": + logrus.SetLevel(logrus.DebugLevel) + case "INFO": + logrus.SetLevel(logrus.InfoLevel) + case "WARN": + logrus.SetLevel(logrus.WarnLevel) + case "ERROR": + logrus.SetLevel(logrus.ErrorLevel) + case "FATAL": + logrus.SetLevel(logrus.FatalLevel) + case "PANIC": + logrus.SetLevel(logrus.PanicLevel) + default: + return + } + + logrus.SetFormatter(&logrus.TextFormatter{}) + logrus.SetOutput(os.Stdout) + +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/panic.go b/vendor/github.com/openshift/library-go/pkg/serviceability/panic.go new file mode 100644 index 00000000000..506298af6d0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/panic.go @@ -0,0 +1,93 @@ +package serviceability + +import ( + "encoding/json" + "strings" + "time" + + "k8s.io/klog" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" +) + +// BehaviorOnPanic is a helper for setting the crash mode of OpenShift when a panic is caught. +// It returns a function that should be the defer handler for the caller. +func BehaviorOnPanic(modeString string, productVersion version.Info) func() { + modes := []string{} + if err := json.Unmarshal([]byte(modeString), &modes); err != nil { + return behaviorOnPanic(modeString, productVersion) + } + + fns := []func(){} + + for _, mode := range modes { + fns = append(fns, behaviorOnPanic(mode, productVersion)) + } + + return func() { + for _, fn := range fns { + fn() + } + } +} + +func behaviorOnPanic(mode string, productVersion version.Info) func() { + doNothing := func() {} + + switch { + case mode == "crash": + klog.Infof("Process will terminate as soon as a panic occurs.") + utilruntime.ReallyCrash = true + return doNothing + + case strings.HasPrefix(mode, "crash-after-delay:"): + delayDurationString := strings.TrimPrefix(mode, "crash-after-delay:") + delayDuration, err := time.ParseDuration(delayDurationString) + if err != nil { + klog.Errorf("Unable to start crash-after-delay. Crashing immediately instead: %v", err) + utilruntime.ReallyCrash = true + return doNothing + } + klog.Infof("Process will terminate %v after a panic occurs.", delayDurationString) + utilruntime.ReallyCrash = false + utilruntime.PanicHandlers = append(utilruntime.PanicHandlers, crashOnDelay(delayDuration, delayDurationString)) + return doNothing + + case strings.HasPrefix(mode, "sentry:"): + url := strings.TrimPrefix(mode, "sentry:") + m, err := NewSentryMonitor(url, productVersion) + if err != nil { + klog.Errorf("Unable to start Sentry for panic tracing: %v", err) + return doNothing + } + klog.Infof("Process will log all panics and errors to Sentry.") + utilruntime.ReallyCrash = false + utilruntime.PanicHandlers = append(utilruntime.PanicHandlers, m.CapturePanic) + utilruntime.ErrorHandlers = append(utilruntime.ErrorHandlers, m.CaptureError) + return func() { + if r := recover(); r != nil { + m.CapturePanicAndWait(r, 2*time.Second) + panic(r) + } + } + case len(mode) == 0: + // default panic behavior + utilruntime.ReallyCrash = false + return doNothing + + default: + klog.Errorf("Unrecognized panic behavior") + return doNothing + } +} + +func crashOnDelay(delay time.Duration, delayString string) func(interface{}) { + return func(in interface{}) { + go func() { + klog.Errorf("Panic happened. Process will crash in %v.", delayString) + time.Sleep(delay) + panic(in) + }() + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/panic_test.go b/vendor/github.com/openshift/library-go/pkg/serviceability/panic_test.go new file mode 100644 index 00000000000..9a6214bc772 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/panic_test.go @@ -0,0 +1,24 @@ +package serviceability + +import ( + "testing" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" +) + +func TestPanicDelayingDeath(t *testing.T) { + BehaviorOnPanic(`["crash-after-delay:10s"]`, version.Info{}) + + utilruntime.ReallyCrash = false + go func() { + defer utilruntime.HandleCrash() + panic("not dead yet!") + }() + + select { + case <-time.After(5 * time.Second): + t.Log("beat death!") + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/profiler.go b/vendor/github.com/openshift/library-go/pkg/serviceability/profiler.go new file mode 100644 index 00000000000..ded45eb29cc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/profiler.go @@ -0,0 +1,34 @@ +package serviceability + +import ( + "fmt" + "net/http" + "os" + "runtime" + + _ "net/http/pprof" // include the default Go profiler mux + + "k8s.io/klog" +) + +// StartProfiler starts the golang profiler on a port if `web` is specified. It uses the "standard" openshift env vars +func StartProfiler() { + if env("OPENSHIFT_PROFILE", "") == "web" { + go func() { + runtime.SetBlockProfileRate(1) + profilePort := env("OPENSHIFT_PROFILE_PORT", "6060") + profileHost := env("OPENSHIFT_PROFILE_HOST", "127.0.0.1") + klog.Infof(fmt.Sprintf("Starting profiling endpoint at http://%s:%s/debug/pprof/", profileHost, profilePort)) + klog.Fatal(http.ListenAndServe(fmt.Sprintf("%s:%s", profileHost, profilePort), nil)) + }() + } +} + +// env returns an environment variable or a default value if not specified. +func env(key string, defaultValue string) string { + val := os.Getenv(key) + if len(val) == 0 { + return defaultValue + } + return val +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/sentry.go b/vendor/github.com/openshift/library-go/pkg/serviceability/sentry.go new file mode 100644 index 00000000000..42968df3afd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/sentry.go @@ -0,0 +1,62 @@ +package serviceability + +import ( + "errors" + "fmt" + "time" + + "github.com/getsentry/raven-go" + + "k8s.io/apimachinery/pkg/version" +) + +// SentryMonitor encapsulates a Sentry client and set of default tags +type SentryMonitor struct { + client *raven.Client + tags map[string]string +} + +// NewSentryMonitor creates a class that can capture panics and errors from OpenShift +// and Kubernetes that can roll up to a Sentry server. +func NewSentryMonitor(url string, version version.Info) (*SentryMonitor, error) { + client, err := raven.NewClient(url, nil) + if err != nil { + return nil, err + } + client.SetRelease(version.GitCommit) + return &SentryMonitor{ + client: client, + }, nil +} + +func (m *SentryMonitor) capturePanic(capture interface{}) chan error { + var packet *raven.Packet + switch rval := capture.(type) { + case error: + packet = raven.NewPacket(rval.Error(), raven.NewException(rval, raven.NewStacktrace(2, 3, nil))) + default: + rvalStr := fmt.Sprint(rval) + packet = raven.NewPacket(rvalStr, raven.NewException(errors.New(rvalStr), raven.NewStacktrace(2, 3, nil))) + } + _, ch := m.client.Capture(packet, m.tags) + return ch +} + +// CapturePanic is used by the Sentry client to capture panics +func (m *SentryMonitor) CapturePanic(capture interface{}) { + m.capturePanic(capture) +} + +// CapturePanicAndWait waits until either the Sentry client captures a panic or +// the provided time expires +func (m *SentryMonitor) CapturePanicAndWait(capture interface{}, until time.Duration) { + select { + case <-m.capturePanic(capture): + case <-time.After(until): + } +} + +// CaptureError is used by the Sentry client to capture errors +func (m *SentryMonitor) CaptureError(err error) { + m.client.CaptureError(err, m.tags) +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/serviceability.go b/vendor/github.com/openshift/library-go/pkg/serviceability/serviceability.go new file mode 100644 index 00000000000..5070e74d7f6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/serviceability.go @@ -0,0 +1,62 @@ +package serviceability + +import ( + "os" + "os/signal" + "strings" + "syscall" + + "github.com/pkg/profile" +) + +// Stop is a function to defer in your main call to provide profile info. +type Stop interface { + Stop() +} + +type stopper struct{} + +func (stopper) Stop() {} + +// Profile returns an interface to defer for a profile: `defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop()` is common. +// Suffixing the mode with `-tmp` will have the profiler write the run to a temporary directory with a unique name, which +// is useful when running the same command multiple times. +func Profile(mode string) Stop { + path := "." + if strings.HasSuffix(mode, "-tmp") { + mode = strings.TrimSuffix(mode, "-tmp") + path = "" + } + var stop Stop + switch mode { + case "mem": + stop = profileOnExit(profile.Start(profile.MemProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "cpu": + stop = profileOnExit(profile.Start(profile.CPUProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "block": + stop = profileOnExit(profile.Start(profile.BlockProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "mutex": + stop = profileOnExit(profile.Start(profile.MutexProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + case "trace": + stop = profileOnExit(profile.Start(profile.TraceProfile, profile.ProfilePath(path), profile.NoShutdownHook, profile.Quiet)) + default: + stop = stopper{} + } + return stop +} + +func profileOnExit(s Stop) Stop { + go func() { + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + <-c + // Programs with more sophisticated signal handling + // should ensure the Stop() function returned from + // Start() is called during shutdown. + // See http://godoc.org/github.com/pkg/profile + s.Stop() + + os.Exit(1) + }() + return s +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/OWNERS b/vendor/github.com/openshift/library-go/pkg/template/OWNERS new file mode 100644 index 00000000000..29a933a5fa8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/OWNERS @@ -0,0 +1,11 @@ +reviewers: + - smarterclayton + - mfojtik + - bparees + - soltysh + - adambkaplan +approvers: + - mfojtik + - bparees + - soltysh + - adambkaplan diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/doc.go b/vendor/github.com/openshift/library-go/pkg/template/generator/doc.go new file mode 100644 index 00000000000..ee5f5906384 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/doc.go @@ -0,0 +1,3 @@ +// Package generator defines GeneratorInterface interface and implements +// some random value generators. +package generator diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/examples/doc.go b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/doc.go new file mode 100644 index 00000000000..4276a3b0d52 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/doc.go @@ -0,0 +1,3 @@ +// Package examples demonstrates possible implementation of some +// random value generators. +package examples diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue.go b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue.go new file mode 100644 index 00000000000..ad61fb8bdd7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue.go @@ -0,0 +1,46 @@ +package examples + +import ( + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strings" +) + +// RemoteValueGenerator implements GeneratorInterface. It fetches random value +// from an external url endpoint based on the "[GET:]" input expression. +// +// Example: +// - "[GET:http://api.example.com/generateRandomValue]" +type RemoteValueGenerator struct { +} + +var remoteExp = regexp.MustCompile(`\[GET\:(http(s)?:\/\/(.+))\]`) + +// NewRemoteValueGenerator creates new RemoteValueGenerator. +func NewRemoteValueGenerator() RemoteValueGenerator { + return RemoteValueGenerator{} +} + +// GenerateValue fetches random value from an external url. The input +// expression must be of the form "[GET:]". +func (g RemoteValueGenerator) GenerateValue(expression string) (interface{}, error) { + matches := remoteExp.FindAllStringIndex(expression, -1) + if len(matches) < 1 { + return expression, fmt.Errorf("no matches found.") + } + for _, r := range matches { + response, err := http.Get(expression[5 : len(expression)-1]) + if err != nil { + return "", err + } + defer response.Body.Close() + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return "", err + } + expression = strings.Replace(expression, expression[r[0]:r[1]], strings.TrimSpace(string(body)), 1) + } + return expression, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue_test.go b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue_test.go new file mode 100644 index 00000000000..9106ddc0d15 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue_test.go @@ -0,0 +1,36 @@ +package examples + +import ( + "fmt" + "net" + "net/http" + "testing" +) + +func TestRemoteValueGenerator(t *testing.T) { + generator := NewRemoteValueGenerator() + + _, err := generator.GenerateValue("[GET:http://api.example.com/new]") + if err == nil { + t.Errorf("Expected error while fetching non-existent remote.") + } +} + +func TestFakeRemoteValueGenerator(t *testing.T) { + // Run the fake remote server + http.HandleFunc("/v1/value/generate", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "NewRandomString") + }) + listener, _ := net.Listen("tcp", ":12345") + go http.Serve(listener, nil) + + generator := NewRemoteValueGenerator() + + value, err := generator.GenerateValue("[GET:http://127.0.0.1:12345/v1/value/generate]") + if err != nil { + t.Errorf(err.Error()) + } + if value != "NewRandomString" { + t.Errorf("Failed to fetch remote value using GET.") + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go new file mode 100644 index 00000000000..03579a64b7b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go @@ -0,0 +1,160 @@ +package generator + +import ( + "fmt" + "math/rand" + "regexp" + "strconv" + "strings" +) + +// ExpressionValueGenerator implements Generator interface. It generates +// random string based on the input expression. The input expression is +// a string, which may contain "[a-zA-Z0-9]{length}" constructs, +// defining range and length of the result random characters. +// +// Examples: +// +// from | value +// ----------------------------- +// "test[0-9]{1}x" | "test7x" +// "[0-1]{8}" | "01001100" +// "0x[A-F0-9]{4}" | "0xB3AF" +// "[a-zA-Z0-9]{8}" | "hW4yQU5i" +// +// TODO: Support more regexp constructs. +type ExpressionValueGenerator struct { + seed *rand.Rand +} + +const ( + Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + Numerals = "0123456789" + Symbols = "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:`" + ASCII = Alphabet + Numerals + Symbols +) + +var ( + rangeExp = regexp.MustCompile(`([\\]?[a-zA-Z0-9]\-?[a-zA-Z0-9]?)`) + generatorsExp = regexp.MustCompile(`\[([a-zA-Z0-9\-\\]+)\](\{([0-9]+)\})`) + expressionExp = regexp.MustCompile(`\[(\\w|\\d|\\a|\\A)|([a-zA-Z0-9]\-[a-zA-Z0-9])+\]`) +) + +// NewExpressionValueGenerator creates new ExpressionValueGenerator. +func NewExpressionValueGenerator(seed *rand.Rand) ExpressionValueGenerator { + return ExpressionValueGenerator{seed: seed} +} + +// GenerateValue generates random string based on the input expression. +// The input expression is a pseudo-regex formatted string. See +// ExpressionValueGenerator for more details. +func (g ExpressionValueGenerator) GenerateValue(expression string) (interface{}, error) { + for { + r := generatorsExp.FindStringIndex(expression) + if r == nil { + break + } + ranges, length, err := rangesAndLength(expression[r[0]:r[1]]) + if err != nil { + return "", err + } + err = replaceWithGenerated( + &expression, + expression[r[0]:r[1]], + findExpressionPos(ranges), + length, + g.seed, + ) + if err != nil { + return "", err + } + } + return expression, nil +} + +// alphabetSlice produces a string slice that contains all characters within +// a specified range. +func alphabetSlice(from, to byte) (string, error) { + leftPos := strings.Index(ASCII, string(from)) + rightPos := strings.LastIndex(ASCII, string(to)) + if leftPos > rightPos { + return "", fmt.Errorf("invalid range specified: %s-%s", string(from), string(to)) + } + return ASCII[leftPos:rightPos], nil +} + +// replaceWithGenerated replaces all occurrences of the given expression +// in the string with random characters of the specified range and length. +func replaceWithGenerated(s *string, expression string, ranges [][]byte, length int, seed *rand.Rand) error { + var alphabet string + for _, r := range ranges { + switch string(r[0]) + string(r[1]) { + case `\w`: + alphabet += Alphabet + Numerals + "_" + case `\d`: + alphabet += Numerals + case `\a`: + alphabet += Alphabet + Numerals + case `\A`: + alphabet += Symbols + default: + slice, err := alphabetSlice(r[0], r[1]) + if err != nil { + return err + } + alphabet += slice + } + } + result := make([]byte, length) + alphabet = removeDuplicateChars(alphabet) + for i := 0; i < length; i++ { + result[i] = alphabet[seed.Intn(len(alphabet))] + } + *s = strings.Replace(*s, expression, string(result), 1) + return nil +} + +// removeDuplicateChars removes the duplicate characters from the data slice +func removeDuplicateChars(input string) string { + data := []byte(input) + length := len(data) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if data[i] == data[j] { + data[j] = data[length] + data = data[0:length] + length-- + j-- + } + } + } + return string(data) +} + +// findExpressionPos searches the given string for the valid expressions +// and returns their corresponding indexes. +func findExpressionPos(s string) [][]byte { + matches := rangeExp.FindAllStringIndex(s, -1) + result := make([][]byte, len(matches)) + for i, r := range matches { + result[i] = []byte{s[r[0]], s[r[1]-1]} + } + return result +} + +// rangesAndLength extracts the expression ranges (eg. [A-Z0-9]) and length +// (eg. {3}). This helper function also validates the expression syntax and +// its length (must be within 1..255). +func rangesAndLength(s string) (string, int, error) { + expr := s[0:strings.LastIndex(s, "{")] + if !expressionExp.MatchString(expr) { + return "", 0, fmt.Errorf("malformed expresion syntax: %s", expr) + } + + length, _ := strconv.Atoi(s[strings.LastIndex(s, "{")+1 : len(s)-1]) + // TODO: We do need to set a better limit for the number of generated characters. + if length > 0 && length <= 255 { + return expr, length, nil + } + return "", 0, fmt.Errorf("range must be within [1-255] characters (%d)", length) +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue_test.go b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue_test.go new file mode 100644 index 00000000000..bda53c94c33 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue_test.go @@ -0,0 +1,73 @@ +package generator + +import ( + "math/rand" + "testing" +) + +func TestExpressionValueGenerator(t *testing.T) { + var tests = []struct { + Expression string + ExpectedValue string + }{ + {"test[A-Z0-9]{4}template", "testQ3HVtemplate"}, + {"[\\d]{3}", "889"}, + {"[\\w]{20}", "hiG4uRbcUDd5PEJLyHZ7"}, + {"[\\a]{10}", "4U390O49B9"}, + {"[\\A]{10}", ")^&-|_:[><"}, + {"strongPassword[\\w]{3}[\\A]{3}", "strongPasswordhiG-|_"}, + {"admin[0-9]{2}[A-Z]{2}", "admin78YB"}, + {"admin[0-9]{2}test[A-Z]{2}", "admin78testYB"}, + } + + for _, test := range tests { + generator := NewExpressionValueGenerator(rand.New(rand.NewSource(1337))) + value, err := generator.GenerateValue(test.Expression) + if err != nil { + t.Errorf("Failed to generate value from %s due to error: %v", test.Expression, err) + } + if value != test.ExpectedValue { + t.Errorf("Failed to generate expected value from %s\n. Generated: %s\n. Expected: %s\n", test.Expression, value, test.ExpectedValue) + } + } +} + +func TestRemoveDuplicatedCharacters(t *testing.T) { + var tests = []struct { + Expression string + ExpectedValue string + }{ + {"abcdefgh", "abcdefgh"}, + {"abcabc", "abc"}, + {"1111111", "1"}, + {"1234567890", "1234567890"}, + {"test@@", "tes@"}, + } + + for _, test := range tests { + result := removeDuplicateChars(test.Expression) + if result != test.ExpectedValue { + t.Errorf("Expected %q, got %q", test.ExpectedValue, result) + } + } +} + +func TestExpressionValueGeneratorErrors(t *testing.T) { + generator := NewExpressionValueGenerator(rand.New(rand.NewSource(1337))) + + if v, err := generator.GenerateValue("[ABC]{3}"); err == nil { + t.Errorf("Expected [ABC]{3} to produce malformed syntax error (returned: %s)", v) + } + + if v, err := generator.GenerateValue("[Z-A]{3}"); err == nil { + t.Errorf("Expected Invalid range specified error, got %s", v) + } + + if v, err := generator.GenerateValue("[A-Z]{300}"); err == nil { + t.Errorf("Expected Invalid range specified error, got %s", v) + } + + if v, err := generator.GenerateValue("[A-Z]{0}"); err == nil { + t.Errorf("Expected Invalid range specified error, got %s", v) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/generator.go b/vendor/github.com/openshift/library-go/pkg/template/generator/generator.go new file mode 100644 index 00000000000..6d3a08b2a92 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/generator.go @@ -0,0 +1,7 @@ +package generator + +// Generator is an interface for generating random values +// from an input expression +type Generator interface { + GenerateValue(expression string) (interface{}, error) +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go new file mode 100644 index 00000000000..efe692f572b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go @@ -0,0 +1,120 @@ +package templateprocessing + +import ( + "encoding/json" + "fmt" + "reflect" + + "k8s.io/klog" +) + +// visitObjectStrings recursively visits all string fields in the object and calls the +// visitor function on them. The visitor function can be used to modify the +// value of the string fields. +func visitObjectStrings(obj interface{}, visitor func(string) (string, bool)) error { + return visitValue(reflect.ValueOf(obj), visitor) +} + +func visitValue(v reflect.Value, visitor func(string) (string, bool)) error { + // you'll never be able to substitute on a nil. Check the kind first or you'll accidentally + // end up panic-ing + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return nil + } + } + + switch v.Kind() { + + case reflect.Ptr, reflect.Interface: + err := visitValue(v.Elem(), visitor) + if err != nil { + return err + } + case reflect.Slice, reflect.Array: + vt := v.Type().Elem() + for i := 0; i < v.Len(); i++ { + val, err := visitUnsettableValues(vt, v.Index(i), visitor) + if err != nil { + return err + } + v.Index(i).Set(val) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + err := visitValue(v.Field(i), visitor) + if err != nil { + return err + } + } + case reflect.Map: + vt := v.Type().Elem() + for _, oldKey := range v.MapKeys() { + newKey, err := visitUnsettableValues(oldKey.Type(), oldKey, visitor) + if err != nil { + return err + } + + oldValue := v.MapIndex(oldKey) + newValue, err := visitUnsettableValues(vt, oldValue, visitor) + if err != nil { + return err + } + v.SetMapIndex(oldKey, reflect.Value{}) + v.SetMapIndex(newKey, newValue) + } + case reflect.String: + if !v.CanSet() { + return fmt.Errorf("unable to set String value '%v'", v) + } + s, asString := visitor(v.String()) + if !asString { + return fmt.Errorf("attempted to set String field to non-string value '%v'", s) + } + v.SetString(s) + default: + klog.V(5).Infof("Ignoring non-parameterizable field type '%s': %v", v.Kind(), v) + return nil + } + return nil +} + +// visitUnsettableValues creates a copy of the object you want to modify and returns the modified result +func visitUnsettableValues(typeOf reflect.Type, original reflect.Value, visitor func(string) (string, bool)) (reflect.Value, error) { + val := reflect.New(typeOf).Elem() + existing := original + // if the value type is interface, we must resolve it to a concrete value prior to setting it back. + if existing.CanInterface() { + existing = reflect.ValueOf(existing.Interface()) + } + switch existing.Kind() { + case reflect.String: + s, asString := visitor(existing.String()) + + if asString { + val = reflect.ValueOf(s) + } else { + b := []byte(s) + var data interface{} + err := json.Unmarshal(b, &data) + if err != nil { + // the result of substitution may have been an unquoted string value, + // which is an error when decoding in json(only "true", "false", and numeric + // values can be unquoted), so try wrapping the value in quotes so it will be + // properly converted to a string type during decoding. + val = reflect.ValueOf(s) + } else { + val = reflect.ValueOf(data) + } + } + + default: + if existing.IsValid() && existing.Kind() != reflect.Invalid { + val.Set(existing) + } + visitValue(val, visitor) + } + + return val, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object_test.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object_test.go new file mode 100644 index 00000000000..a8ef7d4789c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object_test.go @@ -0,0 +1,110 @@ +package templateprocessing + +import ( + "fmt" + "reflect" + "testing" +) + +type sampleInnerStruct struct { + Name string + Number int + List []string + Map map[string]string +} + +type sampleStruct struct { + Name string + Inner sampleInnerStruct + Ptr *sampleInnerStruct + MapInMap map[string]map[string]string + ArrayInArray [][]string + Array []string + ArrayInMap map[string][]interface{} +} + +func TestVisitObjectStringsOnStruct(t *testing.T) { + samples := [][]sampleStruct{ + {{}, {}}, + {{Name: "Foo"}, {Name: "sample-Foo"}}, + {{Ptr: nil}, {Ptr: nil}}, + {{Ptr: &sampleInnerStruct{Name: "foo"}}, {Ptr: &sampleInnerStruct{Name: "sample-foo"}}}, + {{Inner: sampleInnerStruct{Name: "foo"}}, {Inner: sampleInnerStruct{Name: "sample-foo"}}}, + {{Array: []string{"foo", "bar"}}, {Array: []string{"sample-foo", "sample-bar"}}}, + { + { + MapInMap: map[string]map[string]string{ + "foo": {"bar": "test"}, + }, + }, + { + MapInMap: map[string]map[string]string{ + "sample-foo": {"sample-bar": "sample-test"}, + }, + }, + }, + { + {ArrayInArray: [][]string{{"foo", "bar"}}}, + {ArrayInArray: [][]string{{"sample-foo", "sample-bar"}}}, + }, + { + {ArrayInMap: map[string][]interface{}{"key": {"foo", "bar"}}}, + {ArrayInMap: map[string][]interface{}{"sample-key": {"sample-foo", "sample-bar"}}}, + }, + } + for i := range samples { + visitObjectStrings(&samples[i][0], func(in string) (string, bool) { + if len(in) == 0 { + return in, true + } + return fmt.Sprintf("sample-%s", in), true + }) + if !reflect.DeepEqual(samples[i][0], samples[i][1]) { + t.Errorf("[%d] Got:\n%#v\nExpected:\n%#v", i, samples[i][0], samples[i][1]) + } + } +} + +func TestVisitObjectStringsOnMap(t *testing.T) { + samples := [][]map[string]string{ + { + {"foo": "bar"}, + {"sample-foo": "sample-bar"}, + }, + { + {"empty": ""}, + {"sample-empty": "sample-"}, + }, + { + {"": "invalid"}, + {"sample-": "sample-invalid"}, + }, + } + + for i := range samples { + visitObjectStrings(&samples[i][0], func(in string) (string, bool) { + return fmt.Sprintf("sample-%s", in), true + }) + if !reflect.DeepEqual(samples[i][0], samples[i][1]) { + t.Errorf("Got %#v, expected %#v", samples[i][0], samples[i][1]) + } + } +} + +func TestVisitObjectStringsOnArray(t *testing.T) { + samples := [][][]string{ + { + {"foo", "bar"}, + {"sample-foo", "sample-bar"}, + }, + } + + for i := range samples { + visitObjectStrings(&samples[i][0], func(in string) (string, bool) { + return fmt.Sprintf("sample-%s", in), true + }) + if !reflect.DeepEqual(samples[i][0], samples[i][1]) { + t.Errorf("Got %#v, expected %#v", samples[i][0], samples[i][1]) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go new file mode 100644 index 00000000000..497c0e399c7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go @@ -0,0 +1,295 @@ +package templateprocessing + +import ( + "fmt" + "regexp" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + + appsv1 "github.com/openshift/api/apps/v1" + templatev1 "github.com/openshift/api/template/v1" + "github.com/openshift/library-go/pkg/legacyapi/legacygroupification" + . "github.com/openshift/library-go/pkg/template/generator" +) + +// match ${KEY}, KEY will be grouped +var stringParameterExp = regexp.MustCompile(`\$\{([a-zA-Z0-9\_]+?)\}`) + +// match ${{KEY}} exact match only, KEY will be grouped +var nonStringParameterExp = regexp.MustCompile(`^\$\{\{([a-zA-Z0-9\_]+)\}\}$`) + +// Processor process the Template into the List with substituted parameters +type Processor struct { + Generators map[string]Generator +} + +// NewProcessor creates new Processor and initializv1es its set of generators. +func NewProcessor(generators map[string]Generator) *Processor { + return &Processor{Generators: generators} +} + +// Process transforms Template object into List object. It generates +// Parameter values using the defined set of generators first, and then it +// substitutes all Parameter expression occurrences with their corresponding +// values (currently in the containers' Environment variables only). +func (p *Processor) Process(template *templatev1.Template) field.ErrorList { + templateErrors := field.ErrorList{} + + if errs := p.GenerateParameterValues(template); len(errs) > 0 { + return append(templateErrors, errs...) + } + + // Place parameters into a map for efficient lookup + paramMap := make(map[string]templatev1.Parameter) + for _, param := range template.Parameters { + paramMap[param.Name] = param + } + + // Perform parameter substitution on the template's user message. This can be used to + // instruct a user on next steps for the template. + template.Message, _ = p.EvaluateParameterSubstitution(paramMap, template.Message) + + // substitute parameters in ObjectLabels - must be done before the template + // objects themselves are iterated. + for k, v := range template.ObjectLabels { + newk, _ := p.EvaluateParameterSubstitution(paramMap, k) + v, _ = p.EvaluateParameterSubstitution(paramMap, v) + template.ObjectLabels[newk] = v + + if newk != k { + delete(template.ObjectLabels, k) + } + } + + itemPath := field.NewPath("item") + for i, item := range template.Objects { + idxPath := itemPath.Index(i) + var currObj runtime.Object + + if len(item.Raw) > 0 { + // TODO: use runtime.DecodeList when it returns ValidationErrorList + decodedObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, item.Raw) + if err != nil { + templateErrors = append(templateErrors, field.Invalid(idxPath.Child("objects"), item, fmt.Sprintf("unable to handle object: %v", err))) + continue + } + currObj = decodedObj + } else { + currObj = item.Object.DeepCopyObject() + } + + // If an object definition's metadata includes a hardcoded namespace field, the field will be stripped out of + // the definition during template instantiation. Namespace fields that contain a ${PARAMETER_REFERENCE} + // will be left in place, resolved during parameter substition, and the object will be created in the + // referenced namespace. + stripNamespace(currObj) + + newItem, err := p.SubstituteParameters(paramMap, currObj) + if err != nil { + templateErrors = append(templateErrors, field.Invalid(idxPath.Child("parameters"), template.Parameters, err.Error())) + } + + // this changes oapi GVKs to groupified GVKs so they can be submitted to modern, aggregated servers + // It is done after substitution in case someone substitutes a kind. + gvk := currObj.GetObjectKind().GroupVersionKind() + legacygroupification.OAPIToGroupifiedGVK(&gvk) + newItem.GetObjectKind().SetGroupVersionKind(gvk) + + if err := addObjectLabels(newItem, template.ObjectLabels); err != nil { + templateErrors = append(templateErrors, field.Invalid(idxPath.Child("labels"), + template.ObjectLabels, fmt.Sprintf("label could not be applied: %v", err))) + } + template.Objects[i] = runtime.RawExtension{Object: newItem} + } + + return templateErrors +} + +func stripNamespace(obj runtime.Object) { + // Remove namespace from the item unless it contains a ${PARAMETER_REFERENCE} + if itemMeta, err := meta.Accessor(obj); err == nil && len(itemMeta.GetNamespace()) > 0 && !stringParameterExp.MatchString(itemMeta.GetNamespace()) { + itemMeta.SetNamespace("") + return + } + // TODO: allow meta.Accessor to handle runtime.Unstructured + if unstruct, ok := obj.(*unstructured.Unstructured); ok && unstruct.Object != nil { + if obj, ok := unstruct.Object["metadata"]; ok { + if m, ok := obj.(map[string]interface{}); ok { + if ns, ok := m["namespace"]; ok { + if ns, ok := ns.(string); !ok || !stringParameterExp.MatchString(ns) { + m["namespace"] = "" + } + } + } + return + } + if ns, ok := unstruct.Object["namespace"]; ok { + if ns, ok := ns.(string); !ok || !stringParameterExp.MatchString(ns) { + unstruct.Object["namespace"] = "" + return + } + } + } +} + +// GetParameterByName searches for a Parameter in the Template +// based on its name. +func GetParameterByName(t *templatev1.Template, name string) *templatev1.Parameter { + for i, param := range t.Parameters { + if param.Name == name { + return &(t.Parameters[i]) + } + } + return nil +} + +// EvaluateParameterSubstitution replaces escaped parameters in a string with values from the +// provided map. Returns the substituted value (if any substitution applied) and a boolean +// indicating if the resulting value should be treated as a string(true) or a non-string +// value(false) for purposes of json encoding. +func (p *Processor) EvaluateParameterSubstitution(params map[string]templatev1.Parameter, in string) (string, bool) { + out := in + // First check if the value matches the "${{KEY}}" substitution syntax, which + // means replace and drop the quotes because the parameter value is to be used + // as a non-string value. If we hit a match here, we're done because the + // "${{KEY}}" syntax is exact match only, it cannot be used in a value like + // "FOO_${{KEY}}_BAR", no substitution will be performed if it is used in that way. + for _, match := range nonStringParameterExp.FindAllStringSubmatch(in, -1) { + if len(match) > 1 { + if paramValue, found := params[match[1]]; found { + out = strings.Replace(out, match[0], paramValue.Value, 1) + return out, false + } + } + } + + // If we didn't do a non-string substitution above, do normal string substitution + // on the value here if it contains a "${KEY}" reference. This substitution does + // allow multiple matches and prefix/postfix, eg "FOO_${KEY1}_${KEY2}_BAR" + for _, match := range stringParameterExp.FindAllStringSubmatch(in, -1) { + if len(match) > 1 { + if paramValue, found := params[match[1]]; found { + out = strings.Replace(out, match[0], paramValue.Value, 1) + } + } + } + return out, true +} + +// SubstituteParameters loops over all values defined in structured +// and unstructured types that are children of item. +// +// Example of Parameter expression: +// - ${PARAMETER_NAME} +// +func (p *Processor) SubstituteParameters(params map[string]templatev1.Parameter, item runtime.Object) (runtime.Object, error) { + visitObjectStrings(item, func(in string) (string, bool) { + return p.EvaluateParameterSubstitution(params, in) + }) + return item, nil +} + +// GenerateParameterValues generates Value for each Parameter of the given +// Template that has Generate field specified where Value is not already +// supplied. +// +// Examples: +// +// from | value +// ----------------------------- +// "test[0-9]{1}x" | "test7x" +// "[0-1]{8}" | "01001100" +// "0x[A-F0-9]{4}" | "0xB3AF" +// "[a-zA-Z0-9]{8}" | "hW4yQU5i" +// If an error occurs, the parameter that caused the error is returned along with the error message. +func (p *Processor) GenerateParameterValues(t *templatev1.Template) field.ErrorList { + var errs field.ErrorList + + for i := range t.Parameters { + param := &t.Parameters[i] + if len(param.Value) > 0 { + continue + } + templatePath := field.NewPath("template").Child("parameters").Index(i) + if param.Generate != "" { + generator, ok := p.Generators[param.Generate] + if !ok { + err := fmt.Errorf("Unknown generator name '%v' for parameter %s", param.Generate, param.Name) + errs = append(errs, field.Invalid(templatePath, param.Generate, err.Error())) + continue + } + if generator == nil { + err := fmt.Errorf("template.parameters[%v]: Invalid '%v' generator for parameter %s", i, param.Generate, param.Name) + errs = append(errs, field.Invalid(templatePath, param, err.Error())) + continue + } + value, err := generator.GenerateValue(param.From) + if err != nil { + errs = append(errs, field.Invalid(templatePath, param, err.Error())) + continue + } + param.Value, ok = value.(string) + if !ok { + err := fmt.Errorf("template.parameters[%v]: Unable to convert the generated value '%#v' to string for parameter %s", i, value, param.Name) + errs = append(errs, field.Invalid(templatePath, param, err.Error())) + continue + } + } + if len(param.Value) == 0 && param.Required { + err := fmt.Errorf("template.parameters[%v]: parameter %s is required and must be specified", i, param.Name) + errs = append(errs, field.Required(templatePath, err.Error())) + } + } + + return errs +} + +// addObjectLabels adds new label(s) to a single runtime.Object, overwriting +// existing labels that have the same key. +func addObjectLabels(obj runtime.Object, labels labels.Set) error { + if labels == nil { + return nil + } + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + metaLabels := accessor.GetLabels() + if metaLabels == nil { + metaLabels = make(map[string]string) + } + for k, v := range labels { + metaLabels[k] = v + } + accessor.SetLabels(metaLabels) + + switch objType := obj.(type) { + case *appsv1.DeploymentConfig: + if err := addDeploymentConfigNestedLabels(objType, labels); err != nil { + return fmt.Errorf("unable to add nested labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err) + } + } + + return nil +} + +// addDeploymentConfigNestedLabels adds new label(s) to a nested labels of a single DeploymentConfig object +func addDeploymentConfigNestedLabels(obj *appsv1.DeploymentConfig, labels labels.Set) error { + if obj.Spec.Template == nil { + return nil + } + if obj.Spec.Template.Labels == nil { + obj.Spec.Template.Labels = make(map[string]string) + } + for k, v := range labels { + obj.Spec.Template.Labels[k] = v + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template_test.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template_test.go new file mode 100644 index 00000000000..21d2ced92e9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template_test.go @@ -0,0 +1,589 @@ +package templateprocessing + +import ( + "fmt" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/validation/field" + + appsv1 "github.com/openshift/api/apps/v1" + templatev1 "github.com/openshift/api/template/v1" + "github.com/openshift/library-go/pkg/template/generator" +) + +var codecFactory = serializer.CodecFactory{} + +func init() { + _, codecFactory = apitesting.SchemeForOrDie(templatev1.Install) +} + +func makeParameter(name, value, generate string, required bool) templatev1.Parameter { + return templatev1.Parameter{ + Name: name, + Value: value, + Generate: generate, + Required: required, + } +} + +type FooGenerator struct { +} + +func (g FooGenerator) GenerateValue(expression string) (interface{}, error) { + return "foo", nil +} + +type ErrorGenerator struct { +} + +func (g ErrorGenerator) GenerateValue(expression string) (interface{}, error) { + return "", fmt.Errorf("error") +} + +type NoStringGenerator struct { +} + +func (g NoStringGenerator) GenerateValue(expression string) (interface{}, error) { + return NoStringGenerator{}, nil +} + +type EmptyGenerator struct { +} + +func (g EmptyGenerator) GenerateValue(expression string) (interface{}, error) { + return "", nil +} + +func TestParameterGenerators(t *testing.T) { + tests := []struct { + parameter templatev1.Parameter + generators map[string]generator.Generator + shouldPass bool + expected templatev1.Parameter + errType field.ErrorType + fieldPath string + }{ + { // Empty generator, should pass + makeParameter("PARAM-pass-empty-gen", "X", "", false), + map[string]generator.Generator{}, + true, + makeParameter("PARAM-pass-empty-gen", "X", "", false), + "", + "", + }, + { // Foo generator, should pass + makeParameter("PARAM-pass-foo-gen", "", "foo", false), + map[string]generator.Generator{"foo": FooGenerator{}}, + true, + makeParameter("PARAM-pass-foo-gen", "foo", "", false), + "", + "", + }, + { // Foo generator, should fail + makeParameter("PARAM-fail-foo-gen", "", "foo", false), + map[string]generator.Generator{}, + false, + makeParameter("PARAM-fail-foo-gen", "foo", "", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // No str generator, should fail + makeParameter("PARAM-fail-nostr-gen", "", "foo", false), + map[string]generator.Generator{"foo": NoStringGenerator{}}, + false, + makeParameter("PARAM-fail-nostr-gen", "foo", "", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // Invalid generator, should fail + makeParameter("PARAM-fail-inv-gen", "", "invalid", false), + map[string]generator.Generator{"invalid": nil}, + false, + makeParameter("PARAM-fail-inv-gen", "", "invalid", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // Error generator, should fail + makeParameter("PARAM-fail-err-gen", "", "error", false), + map[string]generator.Generator{"error": ErrorGenerator{}}, + false, + makeParameter("PARAM-fail-err-gen", "", "error", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // Error required parameter, no value, should fail + makeParameter("PARAM-fail-no-val", "", "", true), + map[string]generator.Generator{"error": ErrorGenerator{}}, + false, + makeParameter("PARAM-fail-no-val", "", "", true), + field.ErrorTypeRequired, + "template.parameters[0]", + }, + { // Error required parameter, no value from generator, should fail + makeParameter("PARAM-fail-no-val-from-gen", "", "empty", true), + map[string]generator.Generator{"empty": EmptyGenerator{}}, + false, + makeParameter("PARAM-fail-no-val-from-gen", "", "empty", true), + field.ErrorTypeRequired, + "template.parameters[0]", + }, + } + + for i, test := range tests { + processor := NewProcessor(test.generators) + template := templatev1.Template{Parameters: []templatev1.Parameter{test.parameter}} + errs := processor.GenerateParameterValues(&template) + if errs != nil && test.shouldPass { + t.Errorf("test[%v]: Unexpected error %v", i, errs) + } + if errs == nil && !test.shouldPass { + t.Errorf("test[%v]: Expected error", i) + } + if errs != nil { + if test.errType != errs[0].Type { + t.Errorf("test[%v]: Unexpected error type: Expected: %s, got %s", i, test.errType, errs[0].Type) + } + if test.fieldPath != errs[0].Field { + t.Errorf("test[%v]: Unexpected error type: Expected: %s, got %s", i, test.fieldPath, errs[0].Field) + } + continue + } + actual := template.Parameters[0] + if actual.Value != test.expected.Value { + t.Errorf("test[%v]: Unexpected value: Expected: %#v, got: %#v", i, test.expected.Value, test.parameter.Value) + } + } +} + +func TestProcessValue(t *testing.T) { + var template templatev1.Template + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), []byte(`{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v${VALUE}", + "metadata": { + "labels": { + "i1": "${{INT_1}}", + "invalidjsonmap": "${{INVALID_JSON_MAP}}", + "invalidjsonarray": "${{INVALID_JSON_ARRAY}}", + "key1": "${VALUE}", + "key2": "$${VALUE}", + "quoted_string": "${{STRING_1}}", + "s1_s1": "${STRING_1}_${STRING_1}", + "s1_s2": "${STRING_1}_${STRING_2}", + "untouched": "a${{INT_1}}", + "untouched2": "${{INT_1}}a", + "untouched3": "${{INVALID_PARAMETER}}", + "untouched4": "${{INVALID PARAMETER}}", + "validjsonmap": "${{VALID_JSON_MAP}}", + "validjsonarray": "${{VALID_JSON_ARRAY}}" + + } + } + } + ] + }`), &template); err != nil { + t.Fatalf("unexpected error: %v", err) + } + generators := map[string]generator.Generator{ + "expression": generator.NewExpressionValueGenerator(rand.New(rand.NewSource(1337))), + } + processor := NewProcessor(generators) + + // Define custom parameter for the transformation: + addParameter(&template, makeParameter("VALUE", "1", "", false)) + addParameter(&template, makeParameter("STRING_1", "string1", "", false)) + addParameter(&template, makeParameter("STRING_2", "string2", "", false)) + addParameter(&template, makeParameter("INT_1", "1", "", false)) + addParameter(&template, makeParameter("VALID_JSON_MAP", "{\"key\":\"value\"}", "", false)) + addParameter(&template, makeParameter("INVALID_JSON_MAP", "{\"key\":\"value\"", "", false)) + addParameter(&template, makeParameter("VALID_JSON_ARRAY", "[\"key\",\"value\"]", "", false)) + addParameter(&template, makeParameter("INVALID_JSON_ARRAY", "[\"key\":\"value\"", "", false)) + + // Transform the template config into the result config + errs := processor.Process(&template) + if len(errs) > 0 { + t.Fatalf("unexpected error: %v", errs) + } + result, err := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &template) + if err != nil { + t.Fatalf("unexpected error during encoding Config: %#v", err) + } + expect := `{"kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null},"objects":[{"apiVersion":"v1","kind":"Service","metadata":{"labels":{"i1":1,"invalidjsonarray":"[\"key\":\"value\"","invalidjsonmap":"{\"key\":\"value\"","key1":"1","key2":"$1","quoted_string":"string1","s1_s1":"string1_string1","s1_s2":"string1_string2","untouched":"a${{INT_1}}","untouched2":"${{INT_1}}a","untouched3":"${{INVALID_PARAMETER}}","untouched4":"${{INVALID PARAMETER}}","validjsonarray":["key","value"],"validjsonmap":{"key":"value"}}}}],"parameters":[{"name":"VALUE","value":"1"},{"name":"STRING_1","value":"string1"},{"name":"STRING_2","value":"string2"},{"name":"INT_1","value":"1"},{"name":"VALID_JSON_MAP","value":"{\"key\":\"value\"}"},{"name":"INVALID_JSON_MAP","value":"{\"key\":\"value\""},{"name":"VALID_JSON_ARRAY","value":"[\"key\",\"value\"]"},{"name":"INVALID_JSON_ARRAY","value":"[\"key\":\"value\""}]}` + stringResult := strings.TrimSpace(string(result)) + if expect != stringResult { + //t.Errorf("unexpected output, expected: \n%s\nGot:\n%s\n", expect, stringResult) + t.Errorf("unexpected output: %s", diff.StringDiff(expect, stringResult)) + } +} + +var trailingWhitespace = regexp.MustCompile(`\n\s*`) + +func TestEvaluateLabels(t *testing.T) { + testCases := map[string]struct { + Input string + Output string + Labels map[string]string + }{ + "no labels": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"} } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key1":"v1","key2":"v2"}} + } + ] + }`, + }, + "one different label": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"} } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key1":"v1","key2":"v2","key3":"v3"}} + } + ], + "labels":{"key3":"v3"} + }`, + Labels: map[string]string{"key3": "v3"}, + }, + "when the root object has labels and metadata": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {}, + "labels": { + "key1": "v1", + "key2": "v2" + } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service", + "labels":{"key1":"v1","key2":"v2"}, + "metadata":{"labels":{"key3":"v3"}} + } + ], + "labels":{"key3":"v3"} + }`, + Labels: map[string]string{"key3": "v3"}, + }, + "overwrites label": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"} } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key1":"v1","key2":"v3"}} + } + ], + "labels":{"key2":"v3"} + }`, + Labels: map[string]string{"key2": "v3"}, + }, + "parameterised labels": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"}} + } + ], + "parameters": [ + { + "name": "KEY", + "value": "key" + }, + { + "name": "VALUE", + "value": "value" + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key":"value","key1":"v1","key2":"v2"}} + } + ], + "parameters":[ + { + "name":"KEY", + "value":"key" + }, + { + "name":"VALUE", + "value":"value" + } + ], + "labels":{"key":"value"} + }`, + Labels: map[string]string{"${KEY}": "${VALUE}"}, + }, + } + + for k, testCase := range testCases { + var template templatev1.Template + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), []byte(testCase.Input), &template); err != nil { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + + generators := map[string]generator.Generator{ + "expression": generator.NewExpressionValueGenerator(rand.New(rand.NewSource(1337))), + } + processor := NewProcessor(generators) + + template.ObjectLabels = testCase.Labels + + // Transform the template config into the result config + errs := processor.Process(&template) + if len(errs) > 0 { + t.Errorf("%s: unexpected error: %v", k, errs) + continue + } + result, err := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &template) + if err != nil { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + expect := testCase.Output + expect = trailingWhitespace.ReplaceAllString(expect, "") + stringResult := strings.TrimSpace(string(result)) + if expect != stringResult { + t.Errorf("%s: unexpected output: %s", k, diff.StringDiff(expect, stringResult)) + continue + } + } +} + +func TestProcessTemplateParameters(t *testing.T) { + var template, expectedTemplate templatev1.Template + jsonData, _ := ioutil.ReadFile("testdata/guestbook.json") + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), jsonData, &template); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expectedData, _ := ioutil.ReadFile("testdata/guestbook_list.json") + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), expectedData, &expectedTemplate); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + generators := map[string]generator.Generator{ + "expression": generator.NewExpressionValueGenerator(rand.New(rand.NewSource(1337))), + } + processor := NewProcessor(generators) + + // Define custom parameter for the transformation: + addParameter(&template, makeParameter("CUSTOM_PARAM1", "1", "", false)) + + // Transform the template config into the result config + errs := processor.Process(&template) + if len(errs) > 0 { + t.Fatalf("unexpected error: %v", errs) + } + result, err := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &template) + if err != nil { + t.Fatalf("unexpected error during encoding Config: %#v", err) + } + exp, _ := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &expectedTemplate) + + if string(result) != string(exp) { + t.Errorf("unexpected output: %s", diff.StringDiff(string(exp), string(result))) + } +} + +// addParameter adds new custom parameter to the Template. It overrides +// the existing parameter, if already defined. +func addParameter(t *templatev1.Template, param templatev1.Parameter) { + if existing := GetParameterByName(t, param.Name); existing != nil { + *existing = param + } else { + t.Parameters = append(t.Parameters, param) + } +} + +func TestAddConfigLabels(t *testing.T) { + var nilLabels map[string]string + + testCases := []struct { + obj runtime.Object + addLabels map[string]string + err bool + expectedLabels map[string]string + }{ + { // [0] Test nil + nil => nil + obj: &corev1.Pod{}, + addLabels: nilLabels, + err: false, + expectedLabels: nilLabels, + }, + { // [1] Test nil + empty labels => empty labels + obj: &corev1.Pod{}, + addLabels: map[string]string{}, + err: false, + expectedLabels: map[string]string{}, + }, + { // [2] Test obj.Labels + nil => obj.Labels + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + }, + addLabels: nilLabels, + err: false, + expectedLabels: map[string]string{"foo": "bar"}, + }, + { // [3] Test obj.Labels + empty labels => obj.Labels + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + }, + addLabels: map[string]string{}, + err: false, + expectedLabels: map[string]string{"foo": "bar"}, + }, + { // [4] Test nil + addLabels => addLabels + obj: &corev1.Pod{}, + addLabels: map[string]string{"foo": "bar"}, + err: false, + expectedLabels: map[string]string{"foo": "bar"}, + }, + { // [5] Test obj.labels + addLabels => expectedLabels + obj: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"baz": ""}}, + }, + addLabels: map[string]string{"foo": "bar"}, + err: false, + expectedLabels: map[string]string{"foo": "bar", "baz": ""}, + }, + { // [6] Test conflicting keys with the same value + obj: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "same value"}}, + }, + addLabels: map[string]string{"foo": "same value"}, + err: false, + expectedLabels: map[string]string{"foo": "same value"}, + }, + { // [7] Test conflicting keys with a different value + obj: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "first value"}}, + }, + addLabels: map[string]string{"foo": "second value"}, + err: false, + expectedLabels: map[string]string{"foo": "second value"}, + }, + { // [8] Test conflicting keys with the same value in ReplicationController nested labels + obj: &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "same value"}, + }, + Spec: corev1.ReplicationControllerSpec{ + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + }, + addLabels: map[string]string{"foo": "same value"}, + err: false, + expectedLabels: map[string]string{"foo": "same value"}, + }, + { // [9] Test adding labels to a DeploymentConfig object + obj: &appsv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "first value"}, + }, + Spec: appsv1.DeploymentConfigSpec{ + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "first value"}, + }, + }, + }, + }, + addLabels: map[string]string{"bar": "second value"}, + err: false, + expectedLabels: map[string]string{"foo": "first value", "bar": "second value"}, + }, + } + + for i, test := range testCases { + err := addObjectLabels(test.obj, test.addLabels) + if err != nil && !test.err { + t.Errorf("Unexpected error while setting labels on testCase[%v]: %v.", i, err) + } else if err == nil && test.err { + t.Errorf("Unexpected non-error while setting labels on testCase[%v].", i) + } + + accessor, err := meta.Accessor(test.obj) + if err != nil { + t.Error(err) + } + metaLabels := accessor.GetLabels() + if e, a := test.expectedLabels, metaLabels; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected labels on testCase[%v]. Expected: %#v, got: %#v.", i, e, a) + } + + // must not add any new nested labels + switch objType := test.obj.(type) { + case *corev1.ReplicationController: + if e, a := map[string]string{}, objType.Spec.Template.Labels; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected labels on testCase[%v]. Expected: %#v, got: %#v.", i, e, a) + } + case *appsv1.DeploymentConfig: + if e, a := test.expectedLabels, objType.Spec.Template.Labels; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected labels on testCase[%v]. Expected: %#v, got: %#v.", i, e, a) + } + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook.json b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook.json new file mode 100644 index 00000000000..146ecc87a22 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook.json @@ -0,0 +1,305 @@ +{ + "kind": "Template", + "apiVersion": "template.openshift.io/v1", + "metadata": { + "name": "guestbook-example", + "creationTimestamp": null, + "annotations": { + "openshift.io/display-name": "Guestbook Example", + "description": "Example shows how to build a simple multi-tier application using Kubernetes and Docker" + } + }, + "message": "Your admin credentials are ${ADMIN_USERNAME}:${ADMIN_PASSWORD}", + "objects": [ + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "frontend-route", + "creationTimestamp": null + }, + "spec": { + "host": "guestbook.example.com", + "to": { + "kind": "Service", + "name": "frontend-service" + } + }, + "status": {} + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "frontend-service", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 5432, + "targetPort": 5432, + "nodePort": 0 + } + ], + "selector": { + "name": "frontend-service" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "redis-master", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 10000, + "targetPort": 10000, + "nodePort": 0 + } + ], + "selector": { + "name": "redis-master" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${SLAVE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 10001, + "targetPort": 10001, + "nodePort": 0 + } + ], + "selector": { + "name": "${SLAVE_SERVICE_NAME}" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "redis-master", + "creationTimestamp": null, + "labels": { + "name": "redis-master" + } + }, + "spec": { + "containers": [ + { + "name": "master", + "image": "dockerfile/redis", + "ports": [ + { + "containerPort": 6379, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "${REDIS_PASSWORD}" + } + ], + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst", + "serviceAccount": "" + }, + "status": {} + }, + { + "kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "guestbook", + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + } + }, + "spec": { + "replicas": 3, + "selector": { + "name": "frontend-service" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + } + }, + "spec": { + "containers": [ + { + "name": "php-redis", + "image": "brendanburns/php-redis", + "ports": [ + { + "hostPort": 8000, + "containerPort": 80, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "ADMIN_USERNAME", + "value": "${ADMIN_USERNAME}" + }, + { + "name": "ADMIN_PASSWORD", + "value": "${ADMIN_PASSWORD}" + }, + { + "name": "REDIS_PASSWORD", + "value": "${REDIS_PASSWORD}" + } + ], + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + }, + { + "kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "${SLAVE_SERVICE_NAME}", + "creationTimestamp": null, + "labels": { + "name": "${SLAVE_SERVICE_NAME}" + } + }, + "spec": { + "replicas": 2, + "selector": { + "name": "${SLAVE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${SLAVE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "slave", + "image": "brendanburns/${SLAVE_SERVICE_NAME}", + "ports": [ + { + "hostPort": 6380, + "containerPort": 6379, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "${REDIS_PASSWORD}" + } + ], + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + } + ], + "parameters": [ + { + "name": "ADMIN_USERNAME", + "description": "Guestbook administrator username", + "generate": "expression", + "from": "admin[A-Z0-9]{3}" + }, + { + "name": "ADMIN_PASSWORD", + "description": "Guestbook administrator password", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "REDIS_PASSWORD", + "description": "Redis password", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "SLAVE_SERVICE_NAME", + "description": "Slave Service name", + "value": "redis-slave" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook_list.json b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook_list.json new file mode 100644 index 00000000000..d2bdcd7aacb --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook_list.json @@ -0,0 +1,312 @@ +{ + "kind": "Template", + "apiVersion": "template.openshift.io/v1", + "metadata": { + "name": "guestbook-example", + "creationTimestamp": null, + "annotations": { + "openshift.io/display-name": "Guestbook Example", + "description": "Example shows how to build a simple multi-tier application using Kubernetes and Docker" + } + }, + "message": "Your admin credentials are adminQ3H:dwNJiJwW", + "objects": [ + { + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": { + "creationTimestamp": null, + "name": "frontend-route" + }, + "spec": { + "host": "guestbook.example.com", + "to": { + "kind": "Service", + "name": "frontend-service" + } + }, + "status": {} + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "creationTimestamp": null, + "name": "frontend-service" + }, + "spec": { + "ports": [ + { + "nodePort": 0, + "port": 5432, + "protocol": "TCP", + "targetPort": 5432 + } + ], + "selector": { + "name": "frontend-service" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "creationTimestamp": null, + "name": "redis-master" + }, + "spec": { + "ports": [ + { + "nodePort": 0, + "port": 10000, + "protocol": "TCP", + "targetPort": 10000 + } + ], + "selector": { + "name": "redis-master" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "creationTimestamp": null, + "name": "redis-slave" + }, + "spec": { + "ports": [ + { + "nodePort": 0, + "port": 10001, + "protocol": "TCP", + "targetPort": 10001 + } + ], + "selector": { + "name": "redis-slave" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "redis-master" + }, + "name": "redis-master" + }, + "spec": { + "containers": [ + { + "capabilities": {}, + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "P8vxbV4C" + } + ], + "image": "dockerfile/redis", + "imagePullPolicy": "IfNotPresent", + "name": "master", + "ports": [ + { + "containerPort": 6379, + "protocol": "TCP" + } + ], + "resources": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + }, + "terminationMessagePath": "/dev/termination-log" + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "serviceAccount": "" + }, + "status": {} + }, + { + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + }, + "name": "guestbook" + }, + "spec": { + "replicas": 3, + "selector": { + "name": "frontend-service" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + } + }, + "spec": { + "containers": [ + { + "capabilities": {}, + "env": [ + { + "name": "ADMIN_USERNAME", + "value": "adminQ3H" + }, + { + "name": "ADMIN_PASSWORD", + "value": "dwNJiJwW" + }, + { + "name": "REDIS_PASSWORD", + "value": "P8vxbV4C" + } + ], + "image": "brendanburns/php-redis", + "imagePullPolicy": "IfNotPresent", + "name": "php-redis", + "ports": [ + { + "containerPort": 80, + "hostPort": 8000, + "protocol": "TCP" + } + ], + "resources": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + }, + "terminationMessagePath": "/dev/termination-log" + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + }, + { + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "redis-slave" + }, + "name": "redis-slave" + }, + "spec": { + "replicas": 2, + "selector": { + "name": "redis-slave" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "redis-slave" + } + }, + "spec": { + "containers": [ + { + "capabilities": {}, + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "P8vxbV4C" + } + ], + "image": "brendanburns/redis-slave", + "imagePullPolicy": "IfNotPresent", + "name": "slave", + "ports": [ + { + "containerPort": 6379, + "hostPort": 6380, + "protocol": "TCP" + } + ], + "resources": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + }, + "terminationMessagePath": "/dev/termination-log" + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + } + ], + "parameters": [ + { + "name": "ADMIN_USERNAME", + "description": "Guestbook administrator username", + "value": "adminQ3H", + "generate": "expression", + "from": "admin[A-Z0-9]{3}" + }, + { + "name": "ADMIN_PASSWORD", + "description": "Guestbook administrator password", + "value": "dwNJiJwW", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "REDIS_PASSWORD", + "description": "Redis password", + "value": "P8vxbV4C", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "SLAVE_SERVICE_NAME", + "description": "Slave Service name", + "value": "redis-slave" + }, + { + "name": "CUSTOM_PARAM1", + "value": "1" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessingclient/dynamic_process.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessingclient/dynamic_process.go new file mode 100644 index 00000000000..03fc79b9701 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessingclient/dynamic_process.go @@ -0,0 +1,59 @@ +package templateprocessingclient + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic" + + templatev1 "github.com/openshift/api/template/v1" +) + +type DynamicTemplateProcessor interface { + ProcessToList(template *templatev1.Template) (*unstructured.UnstructuredList, error) + ProcessToListFromUnstructured(unstructuredTemplate *unstructured.Unstructured) (*unstructured.UnstructuredList, error) +} + +type dynamicTemplateProcessor struct { + client dynamic.Interface +} + +func NewDynamicTemplateProcessor(client dynamic.Interface) DynamicTemplateProcessor { + return &dynamicTemplateProcessor{client: client} +} + +func (c *dynamicTemplateProcessor) ProcessToList(template *templatev1.Template) (*unstructured.UnstructuredList, error) { + versionedTemplate, err := scheme.ConvertToVersion(template, templatev1.GroupVersion) + if err != nil { + return nil, err + } + unstructuredTemplate, err := runtime.DefaultUnstructuredConverter.ToUnstructured(versionedTemplate) + if err != nil { + return nil, err + } + + return c.ProcessToListFromUnstructured(&unstructured.Unstructured{Object: unstructuredTemplate}) +} + +func (c *dynamicTemplateProcessor) ProcessToListFromUnstructured(unstructuredTemplate *unstructured.Unstructured) (*unstructured.UnstructuredList, error) { + processedTemplate, err := c.client.Resource(templatev1.GroupVersion.WithResource("processedtemplates")). + Namespace("default").Create(unstructuredTemplate, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + // convert the template into something we iterate over as a list + if err := unstructured.SetNestedField(processedTemplate.Object, processedTemplate.Object["objects"], "items"); err != nil { + return nil, err + } + return processedTemplate.ToList() +} + +var ( + scheme = runtime.NewScheme() +) + +func init() { + utilruntime.Must(templatev1.Install(scheme)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/unidling/unidlingclient/scale.go b/vendor/github.com/openshift/library-go/pkg/unidling/unidlingclient/scale.go new file mode 100644 index 00000000000..ec814da7201 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/unidling/unidlingclient/scale.go @@ -0,0 +1,193 @@ +package unidlingclient + +import ( + "fmt" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/scale" + "k8s.io/klog" + + appsv1 "github.com/openshift/api/apps/v1" + unidlingapi "github.com/openshift/api/unidling/v1alpha1" + appsclient "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1" +) + +const legacyGroupName = "" + +// TODO: remove the below functions once we get a way to mark/unmark an object as idled +// via the scale endpoint + +type AnnotationFunc func(currentReplicas int32, annotations map[string]string) + +func NewScaleAnnotater(scales scale.ScalesGetter, mapper meta.RESTMapper, dcs appsclient.DeploymentConfigsGetter, rcs corev1client.ReplicationControllersGetter, changeAnnots AnnotationFunc) *ScaleAnnotater { + return &ScaleAnnotater{ + mapper: mapper, + scales: scales, + dcs: dcs, + rcs: rcs, + ChangeAnnotations: changeAnnots, + } +} + +type ScaleAnnotater struct { + mapper meta.RESTMapper + scales scale.ScalesGetter + dcs appsclient.DeploymentConfigsGetter + rcs corev1client.ReplicationControllersGetter + ChangeAnnotations AnnotationFunc +} + +// ScaleUpdater implements a method "Update" that knows how to update a given object +type ScaleUpdater interface { + Update(*ScaleAnnotater, runtime.Object, *autoscalingv1.Scale) error +} + +// ScaleUpdater implements unidlingutil.ScaleUpdater +type scaleUpdater struct { + encoder runtime.Encoder + namespace string + dcGetter appsclient.DeploymentConfigsGetter + rcGetter corev1client.ReplicationControllersGetter +} + +func NewScaleUpdater(encoder runtime.Encoder, namespace string, dcGetter appsclient.DeploymentConfigsGetter, rcGetter corev1client.ReplicationControllersGetter) ScaleUpdater { + return scaleUpdater{ + encoder: encoder, + namespace: namespace, + dcGetter: dcGetter, + rcGetter: rcGetter, + } +} + +func (s scaleUpdater) Update(annotator *ScaleAnnotater, obj runtime.Object, scale *autoscalingv1.Scale) error { + var ( + err error + patchBytes, originalObj, newObj []byte + ) + + originalObj, err = runtime.Encode(s.encoder, obj) + if err != nil { + return err + } + + switch typedObj := obj.(type) { + case *appsv1.DeploymentConfig: + if typedObj.Annotations == nil { + typedObj.Annotations = make(map[string]string) + } + + annotator.ChangeAnnotations(typedObj.Spec.Replicas, typedObj.Annotations) + typedObj.Spec.Replicas = scale.Spec.Replicas + + newObj, err = runtime.Encode(s.encoder, typedObj) + if err != nil { + return err + } + + patchBytes, err = strategicpatch.CreateTwoWayMergePatch(originalObj, newObj, &appsv1.DeploymentConfig{}) + if err != nil { + return err + } + + _, err = s.dcGetter.DeploymentConfigs(s.namespace).Patch(typedObj.Name, types.StrategicMergePatchType, patchBytes) + case *corev1.ReplicationController: + if typedObj.Annotations == nil { + typedObj.Annotations = make(map[string]string) + } + + annotator.ChangeAnnotations(*typedObj.Spec.Replicas, typedObj.Annotations) + typedObj.Spec.Replicas = &scale.Spec.Replicas + + newObj, err = runtime.Encode(s.encoder, typedObj) + if err != nil { + return err + } + + patchBytes, err = strategicpatch.CreateTwoWayMergePatch(originalObj, newObj, &corev1.ReplicationController{}) + if err != nil { + return err + } + + _, err = s.rcGetter.ReplicationControllers(s.namespace).Patch(typedObj.Name, types.StrategicMergePatchType, patchBytes) + } + return err +} + +// getObjectWithScale either fetches a known type of object and constructs a Scale from that, or uses the scale +// subresource to fetch a Scale by itself. +func (c *ScaleAnnotater) GetObjectWithScale(namespace string, ref unidlingapi.CrossGroupObjectReference) (runtime.Object, *autoscalingv1.Scale, error) { + var obj runtime.Object + var err error + var scale *autoscalingv1.Scale + + switch { + case ref.Kind == "DeploymentConfig" && (ref.Group == appsv1.GroupName || ref.Group == legacyGroupName): + var dc *appsv1.DeploymentConfig + dc, err = c.dcs.DeploymentConfigs(namespace).Get(ref.Name, metav1.GetOptions{}) + + if err != nil { + return nil, nil, err + } + obj = dc + case ref.Kind == "ReplicationController" && ref.Group == corev1.GroupName: + var rc *corev1.ReplicationController + rc, err = c.rcs.ReplicationControllers(namespace).Get(ref.Name, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + obj = rc + } + + mappings, err := c.mapper.RESTMappings(schema.GroupKind{Group: ref.Group, Kind: ref.Kind}) + if err != nil { + return nil, nil, err + } + for _, mapping := range mappings { + scale, err = c.scales.Scales(namespace).Get(mapping.Resource.GroupResource(), ref.Name) + if err != nil { + return nil, nil, err + } + } + + return obj, scale, err +} + +// updateObjectScale updates the scale of an object and removes unidling annotations for objects of a know type. +// For objects of an unknown type, it scales the object using the scale subresource +// (and does not change annotations). +func (c *ScaleAnnotater) UpdateObjectScale(updater ScaleUpdater, namespace string, ref unidlingapi.CrossGroupObjectReference, obj runtime.Object, scale *autoscalingv1.Scale) error { + var err error + + mappings, err := c.mapper.RESTMappings(schema.GroupKind{Group: ref.Group, Kind: ref.Kind}) + if err != nil { + return err + } + if len(mappings) == 0 { + return fmt.Errorf("cannot locate resource for %s.%s/%s", ref.Kind, ref.Group, ref.Name) + } + + for _, mapping := range mappings { + if obj == nil { + _, err = c.scales.Scales(namespace).Update(mapping.Resource.GroupResource(), scale) + return err + } + + switch obj.(type) { + case *appsv1.DeploymentConfig, *corev1.ReplicationController: + return updater.Update(c, obj, scale) + default: + klog.V(2).Infof("Unidling unknown type %t: using scale interface and not removing annotations", obj) + _, err = c.scales.Scales(namespace).Update(mapping.Resource.GroupResource(), scale) + } + } + + return err +} diff --git a/vendor/github.com/openshift/library-go/test/e2e-encryption/encryption_test.go b/vendor/github.com/openshift/library-go/test/e2e-encryption/encryption_test.go new file mode 100644 index 00000000000..ffa708bcbb7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/e2e-encryption/encryption_test.go @@ -0,0 +1,603 @@ +package e2e + +import ( + "encoding/json" + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + apiserverv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/yaml" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" + configv1clientfake "github.com/openshift/client-go/config/clientset/versioned/fake" + configv1informers "github.com/openshift/client-go/config/informers/externalversions" + + "github.com/openshift/library-go/pkg/operator/encryption" + "github.com/openshift/library-go/pkg/operator/encryption/controllers/migrators" + "github.com/openshift/library-go/pkg/operator/encryption/secrets" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/genericoperatorclient" + "github.com/openshift/library-go/pkg/operator/v1helpers" + "github.com/openshift/library-go/test/library" +) + +func TestEncryptionIntegration(tt *testing.T) { + // in terminal print logs immediately + var t T = tt + fi, _ := os.Stdin.Stat() + if (fi.Mode() & os.ModeCharDevice) != 0 { + t = fmtLogger{tt} + } + + stopCh := make(chan struct{}) + defer close(stopCh) + + component := strings.ToLower(library.GenerateNameForTest(tt, "")) + + kubeConfig, err := library.NewClientConfigForTest() + require.NoError(t, err) + + // kube clients + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + require.NoError(t, err) + kubeInformers := v1helpers.NewKubeInformersForNamespaces(kubeClient, "openshift-config-managed") + apiextensionsClient, err := v1beta1.NewForConfig(kubeConfig) + require.NoError(t, err) + + // create ExtensionTest operator CRD + var operatorCRD apiextensionsv1beta1.CustomResourceDefinition + require.NoError(t, yaml.Unmarshal([]byte(encryptionTestOperatorCRD), &operatorCRD)) + crd, err := apiextensionsClient.CustomResourceDefinitions().Create(&operatorCRD) + if errors.IsAlreadyExists(err) { + t.Logf("CRD %s already existing, ignoring error", operatorCRD.Name) + } else { + require.NoError(t, err) + } + defer apiextensionsClient.CustomResourceDefinitions().Delete(crd.Name, &metav1.DeleteOptions{}) + + // create operator client and create instance with ManagementState="Managed" + operatorGVR := schema.GroupVersionResource{Group: operatorCRD.Spec.Group, Version: "v1", Resource: operatorCRD.Spec.Names.Plural} + operatorv1.GroupVersion.WithResource("encryptiontests") + operatorClient, operatorInformer, err := genericoperatorclient.NewClusterScopedOperatorClient(kubeConfig, operatorGVR) + dynamicClient, err := dynamic.NewForConfig(kubeConfig) + require.NoError(t, err) + err = wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { + _, err := dynamicClient.Resource(operatorGVR).Create(&unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "operator.openshift.io/v1", + "kind": "EncryptionTest", + "metadata": map[string]interface{}{ + "name": "cluster", + }, + "spec": map[string]interface{}{ + "managementState": "Managed", + }, + }, + }, metav1.CreateOptions{}) + if err != nil && !errors.IsAlreadyExists(err) { + t.Logf("failed to create APIServer object: %v", err) + return false, nil + } + return true, nil + }) + require.NoError(t, err) + + // create APIServer clients + fakeConfigClient := configv1clientfake.NewSimpleClientset(&configv1.APIServer{ObjectMeta: metav1.ObjectMeta{Name: "cluster"}}) + fakeConfigInformer := configv1informers.NewSharedInformerFactory(fakeConfigClient, 10*time.Minute) + fakeApiServerClient := fakeConfigClient.ConfigV1().APIServers() + + // create controllers + eventRecorder := events.NewLoggingEventRecorder(component) + deployer := NewInstantDeployer(t, stopCh, kubeInformers, kubeClient.CoreV1(), fmt.Sprintf("encryption-config-%s", component)) + migrator := migrators.NewInProcessMigrator(dynamicClient, kubeClient.DiscoveryClient) + + controllers, err := encryption.NewControllers( + component, + deployer, + migrator, + operatorClient, + fakeApiServerClient, + fakeConfigInformer.Config().V1().APIServers(), + kubeInformers, + deployer, // secret client wrapping kubeClient with encryption-config revision counting + eventRecorder, + // some random low-cardinality GVRs: + schema.GroupResource{Group: "operator.openshift.io", Resource: "kubeapiservers"}, + schema.GroupResource{Group: "operator.openshift.io", Resource: "kubeschedulers"}, + ) + require.NoError(t, err) + + // launch controllers + fakeConfigInformer.Start(stopCh) + kubeInformers.Start(stopCh) + operatorInformer.Start(stopCh) + go controllers.Run(stopCh) + + waitForConfigEventuallyCond := func(cond func(s string) bool) { + t.Helper() + stopCh := time.After(wait.ForeverTestTimeout) + for { + c, err := deployer.WaitUntil(stopCh) + require.NoError(t, err) + err = deployer.Deploy() + require.NoError(t, err) + + got := toString(c) + t.Logf("Observed %s", got) + if cond(got) { + return + } + } + } + waitForConfigEventually := func(expected string) { + t.Helper() + waitForConfigEventuallyCond(func(got string) bool { + return expected == got + }) + } + waitForConfigs := func(ss ...string) { + t.Helper() + for _, expected := range ss { + c, err := deployer.Wait() + require.NoError(t, err) + got := toString(c) + t.Logf("Observed %s", got) + if expected != "*" && got != expected { + t.Fatalf("wrong EncryptionConfig:\n expected: %s\n got: %s", expected, got) + } + + err = deployer.Deploy() + require.NoError(t, err) + } + } + conditionStatus := func(condType string) operatorv1.ConditionStatus { + _, status, _, err := operatorClient.GetOperatorState() + require.NoError(t, err) + + for _, c := range status.Conditions { + if c.Type != condType { + continue + } + return c.Status + } + return operatorv1.ConditionUnknown + } + requireConditionStatus := func(condType string, expected operatorv1.ConditionStatus) { + t.Helper() + if status := conditionStatus(condType); status != expected { + t.Errorf("expected condition %s of status %s, found: %q", condType, expected, status) + } + } + waitForConditionStatus := func(condType string, expected operatorv1.ConditionStatus) { + t.Helper() + err := wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { + return conditionStatus(condType) == expected, nil + }) + require.NoError(t, err) + } + waitForMigration := func(key string) { + t.Helper() + err := wait.PollImmediate(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) { + s, err := kubeClient.CoreV1().Secrets("openshift-config-managed").Get(fmt.Sprintf("encryption-key-%s-%s", component, key), metav1.GetOptions{}) + require.NoError(t, err) + + ks, err := secrets.ToKeyState(s) + require.NoError(t, err) + return len(ks.Migrated.Resources) == 2, nil + }) + require.NoError(t, err) + } + + t.Logf("Wait for initial Encrypted condition") + waitForConditionStatus("Encrypted", operatorv1.ConditionFalse) + + t.Logf("Enable encryption, mode aescbc") + _, err = fakeApiServerClient.Patch("cluster", types.MergePatchType, []byte(`{"spec":{"encryption":{"type":"aescbc"}}}`)) + require.NoError(t, err) + + t.Logf("Waiting for key to show up") + keySecretsLabel := fmt.Sprintf("%s=%s", secrets.EncryptionKeySecretsLabel, component) + waitForKeys := func(n int) { + t.Helper() + err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { + l, err := kubeClient.CoreV1().Secrets("openshift-config-managed").List(metav1.ListOptions{LabelSelector: keySecretsLabel}) + if err != nil { + return false, err + } + if len(l.Items) == n { + return true, nil + } + t.Logf("Seeing %d secrets, waiting for %d", len(l.Items), n) + return false, nil + }) + require.NoError(t, err) + } + waitForKeys(1) + waitForConfigs( + "kubeapiservers.operator.openshift.io=identity,aescbc:1;kubeschedulers.operator.openshift.io=identity,aescbc:1", + "kubeapiservers.operator.openshift.io=aescbc:1,identity;kubeschedulers.operator.openshift.io=aescbc:1,identity", + ) + waitForMigration("1") + requireConditionStatus("Encrypted", operatorv1.ConditionTrue) + + t.Logf("Switch to identity") + _, err = fakeApiServerClient.Patch("cluster", types.MergePatchType, []byte(`{"spec":{"encryption":{"type":"identity"}}}`)) + require.NoError(t, err) + waitForKeys(2) + waitForConfigs( + "kubeapiservers.operator.openshift.io=aescbc:1,identity,aesgcm:2;kubeschedulers.operator.openshift.io=aescbc:1,identity,aesgcm:2", + "kubeapiservers.operator.openshift.io=identity,aescbc:1,aesgcm:2;kubeschedulers.operator.openshift.io=identity,aescbc:1,aesgcm:2", + ) + requireConditionStatus("Encrypted", operatorv1.ConditionFalse) + + t.Logf("Switch to empty mode") + _, err = fakeApiServerClient.Patch("cluster", types.MergePatchType, []byte(`{"spec":{"encryption":{"type":""}}}`)) + require.NoError(t, err) + time.Sleep(5 * time.Second) // give controller time to create keys (it shouldn't) + waitForKeys(2) + requireConditionStatus("Encrypted", operatorv1.ConditionFalse) + + t.Logf("Switch to aescbc again") + _, err = fakeApiServerClient.Patch("cluster", types.MergePatchType, []byte(`{"spec":{"encryption":{"type":"aescbc"}}}`)) + require.NoError(t, err) + waitForKeys(3) + waitForConfigs( + "kubeapiservers.operator.openshift.io=identity,aescbc:3,aescbc:1,aesgcm:2;kubeschedulers.operator.openshift.io=identity,aescbc:3,aescbc:1,aesgcm:2", + "kubeapiservers.operator.openshift.io=aescbc:3,aescbc:1,identity,aesgcm:2;kubeschedulers.operator.openshift.io=aescbc:3,aescbc:1,identity,aesgcm:2", + "kubeapiservers.operator.openshift.io=aescbc:3,identity,aesgcm:2;kubeschedulers.operator.openshift.io=aescbc:3,identity,aesgcm:2", + ) + waitForConditionStatus("Encrypted", operatorv1.ConditionTrue) + + t.Logf("Setting external reason") + setExternalReason := func(reason string) { + t.Helper() + err = retry.RetryOnConflict(retry.DefaultBackoff, func() error { + spec, _, rv, err := operatorClient.GetOperatorState() + if err != nil { + return err + } + spec.UnsupportedConfigOverrides.Raw = []byte(fmt.Sprintf(`{"encryption":{"reason":%q}}`, reason)) + _, _, err = operatorClient.UpdateOperatorSpec(rv, spec) + return err + }) + require.NoError(t, err) + } + setExternalReason("a") + waitForKeys(4) + waitForConfigs( + "kubeapiservers.operator.openshift.io=aescbc:3,aescbc:4,identity,aesgcm:2;kubeschedulers.operator.openshift.io=aescbc:3,aescbc:4,identity,aesgcm:2", + "kubeapiservers.operator.openshift.io=aescbc:4,aescbc:3,identity,aesgcm:2;kubeschedulers.operator.openshift.io=aescbc:4,aescbc:3,identity,aesgcm:2", + "kubeapiservers.operator.openshift.io=aescbc:4,aescbc:3,identity;kubeschedulers.operator.openshift.io=aescbc:4,aescbc:3,identity", + ) + + t.Logf("Setting another external reason") + setExternalReason("b") + waitForKeys(5) + waitForConfigs( + "kubeapiservers.operator.openshift.io=aescbc:4,aescbc:5,aescbc:3,identity;kubeschedulers.operator.openshift.io=aescbc:4,aescbc:5,aescbc:3,identity", + "kubeapiservers.operator.openshift.io=aescbc:5,aescbc:4,aescbc:3,identity;kubeschedulers.operator.openshift.io=aescbc:5,aescbc:4,aescbc:3,identity", + "kubeapiservers.operator.openshift.io=aescbc:5,aescbc:4,identity;kubeschedulers.operator.openshift.io=aescbc:5,aescbc:4,identity", + ) + + t.Logf("Expire the last key") + _, err = kubeClient.CoreV1().Secrets("openshift-config-managed").Patch(fmt.Sprintf("encryption-key-%s-5", component), types.MergePatchType, []byte(`{"metadata":{"annotations":{"encryption.apiserver.operator.openshift.io/migrated-timestamp":"2010-10-17T14:14:52+02:00"}}}`)) + require.NoError(t, err) + waitForKeys(6) + waitForConfigs( + "kubeapiservers.operator.openshift.io=aescbc:5,aescbc:6,aescbc:4,identity;kubeschedulers.operator.openshift.io=aescbc:5,aescbc:6,aescbc:4,identity", + "kubeapiservers.operator.openshift.io=aescbc:6,aescbc:5,aescbc:4,identity;kubeschedulers.operator.openshift.io=aescbc:6,aescbc:5,aescbc:4,identity", + "kubeapiservers.operator.openshift.io=aescbc:6,aescbc:5,identity;kubeschedulers.operator.openshift.io=aescbc:6,aescbc:5,identity", + ) + waitForConditionStatus("Encrypted", operatorv1.ConditionTrue) + + t.Logf("Delete the last key") + _, err = kubeClient.CoreV1().Secrets("openshift-config-managed").Patch(fmt.Sprintf("encryption-key-%s-6", component), types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`)) + require.NoError(t, err) + err = kubeClient.CoreV1().Secrets("openshift-config-managed").Delete(fmt.Sprintf("encryption-key-%s-6", component), nil) + require.NoError(t, err) + err = wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { + _, err := kubeClient.CoreV1().Secrets("openshift-config-managed").Get(fmt.Sprintf("encryption-key-%s-7", component), metav1.GetOptions{}) + if errors.IsNotFound(err) { + return false, nil + } + return err == nil, nil + }) + require.NoError(t, err) + // here we see potentially also the following if the key controller is slower than the state controller: + // kubeapiservers.operator.openshift.io=aescbc:6,aescbc:5,identity;kubeschedulers.operator.openshift.io=aescbc:6,aescbc:5,identity + // but eventually we get the following: + waitForConfigEventually( + // 6 as preserved, unbacked config key, 7 as newly created key, and 5 as fully migrated key + "kubeapiservers.operator.openshift.io=aescbc:6,aescbc:7,aescbc:5,aescbc:4,identity;kubeschedulers.operator.openshift.io=aescbc:6,aescbc:7,aescbc:5,aescbc:4,identity", + ) + waitForConfigs( + // 7 is promoted + "kubeapiservers.operator.openshift.io=aescbc:7,aescbc:6,aescbc:5,aescbc:4,identity;kubeschedulers.operator.openshift.io=aescbc:7,aescbc:6,aescbc:5,aescbc:4,identity", + // 7 is migrated, plus one more backed key, which is 5 (6 is deleted) + "kubeapiservers.operator.openshift.io=aescbc:7,aescbc:6,aescbc:5,identity;kubeschedulers.operator.openshift.io=aescbc:7,aescbc:6,aescbc:5,identity", + ) + waitForConditionStatus("Encrypted", operatorv1.ConditionTrue) + + t.Logf("Delete the openshift-config-managed config") + _, err = kubeClient.CoreV1().Secrets("openshift-config-managed").Patch(fmt.Sprintf("encryption-config-%s", component), types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`)) + require.NoError(t, err) + err = kubeClient.CoreV1().Secrets("openshift-config-managed").Delete(fmt.Sprintf("encryption-config-%s", component), nil) + require.NoError(t, err) + waitForConfigs( + // one migrated read-key (7) and one more backed key (5), and everything in between (6) + "kubeapiservers.operator.openshift.io=aescbc:7,aescbc:6,aescbc:5,identity;kubeschedulers.operator.openshift.io=aescbc:7,aescbc:6,aescbc:5,identity", + ) + waitForConditionStatus("Encrypted", operatorv1.ConditionTrue) + + t.Logf("Delete the openshift-config-managed config") + deployer.DeleteOperandConfig() + waitForConfigs( + // 7 is migrated and hence only one needed, but we rotate through identity + "kubeapiservers.operator.openshift.io=identity,aescbc:7;kubeschedulers.operator.openshift.io=identity,aescbc:7", + // 7 is migrated, plus one backed key (5). 6 is deleted, and therefore is not preserved (would be if the operand config was not deleted) + "kubeapiservers.operator.openshift.io=aescbc:7,aescbc:5,identity;kubeschedulers.operator.openshift.io=aescbc:7,aescbc:5,identity", + ) + waitForConditionStatus("Encrypted", operatorv1.ConditionTrue) +} + +const encryptionTestOperatorCRD = ` +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: encryptiontests.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: EncryptionTest + listKind: EncryptionTestList + plural: encryptiontests + singular: encryptiontest + scope: Cluster + subresources: + status: {} + versions: + - name: v1 + served: true + storage: true +` + +func toString(c *apiserverv1.EncryptionConfiguration) string { + rs := make([]string, 0, len(c.Resources)) + for _, r := range c.Resources { + ps := make([]string, 0, len(r.Providers)) + for _, p := range r.Providers { + var s string + switch { + case p.AESCBC != nil: + s = "aescbc:" + p.AESCBC.Keys[0].Name + case p.AESGCM != nil: + s = "aesgcm:" + p.AESGCM.Keys[0].Name + case p.Identity != nil: + s = "identity" + } + ps = append(ps, s) + } + rs = append(rs, fmt.Sprintf("%s=%s", strings.Join(r.Resources, ","), strings.Join(ps, ","))) + } + return strings.Join(rs, ";") +} + +func NewInstantDeployer(t T, stopCh <-chan struct{}, kubeInformers v1helpers.KubeInformersForNamespaces, secretsClient corev1client.SecretsGetter, + secretName string) *lockStepDeployer { + return &lockStepDeployer{ + kubeInformers: kubeInformers, + secretsClient: secretsClient, + stopCh: stopCh, + configManagedSecretsClient: secretInterceptor{ + t: t, + output: make(chan *corev1.Secret), + SecretInterface: secretsClient.Secrets("openshift-config-managed"), + secretName: secretName, + }, + } +} + +// lockStepDeployer mirrors the encryption-config each time Deploy() is called. +// After Deploy() a call to Wait() is necessary. +type lockStepDeployer struct { + stopCh <-chan struct{} + + kubeInformers v1helpers.KubeInformersForNamespaces + secretsClient corev1client.SecretsGetter + configManagedSecretsClient secretInterceptor + + lock sync.Mutex + next *corev1.Secret + current *corev1.Secret + handlers []cache.ResourceEventHandler +} + +func (d *lockStepDeployer) Wait() (*apiserverv1.EncryptionConfiguration, error) { + return d.WaitUntil(nil) +} + +func (d *lockStepDeployer) WaitUntil(stopCh <-chan time.Time) (*apiserverv1.EncryptionConfiguration, error) { + d.lock.Lock() + if d.next != nil { + d.lock.Unlock() + return nil, fmt.Errorf("next secret already set. Forgotten Deploy call?") + } + d.lock.Unlock() + + select { + case s := <-d.configManagedSecretsClient.output: + var c apiserverv1.EncryptionConfiguration + if err := json.Unmarshal(s.Data["encryption-config"], &c); err != nil { + return nil, fmt.Errorf("failed to unmarshal encryption secret: %v", err) + } + + d.lock.Lock() + defer d.lock.Unlock() + d.next = s + + return &c, nil + case <-stopCh: + return nil, fmt.Errorf("timeout") + case <-d.stopCh: + return nil, fmt.Errorf("terminating") + } +} + +func (d *lockStepDeployer) Deploy() error { + d.lock.Lock() + + if d.next == nil { + d.lock.Unlock() + return fmt.Errorf("no next secret available") + } + + old := d.current + d.current = d.next + d.next = nil + + handlers := make([]cache.ResourceEventHandler, len(d.handlers)) + copy(handlers, d.handlers) + + d.lock.Unlock() + + for _, h := range handlers { + if old == nil { + h.OnAdd(d.current) + } else { + h.OnUpdate(old, d.current) + } + } + + return nil +} + +func (d *lockStepDeployer) Secrets(namespace string) corev1client.SecretInterface { + if namespace == "openshift-config-managed" { + return &d.configManagedSecretsClient + } + return d.secretsClient.Secrets(namespace) +} + +type secretInterceptor struct { + corev1client.SecretInterface + + t T + output chan *corev1.Secret + secretName string +} + +func (c *secretInterceptor) Create(s *corev1.Secret) (*corev1.Secret, error) { + s, err := c.SecretInterface.Create(s) + if err != nil { + return s, err + } + + c.t.Logf("Create %s", s.Name) + if s.Name == c.secretName { + c.output <- s + } + + return s, nil +} + +func (c *secretInterceptor) Update(s *corev1.Secret) (*corev1.Secret, error) { + s, err := c.SecretInterface.Update(s) + if err != nil { + return s, err + } + + c.t.Logf("Update %s", s.Name) + if s.Name == c.secretName { + c.output <- s + } + + return s, nil +} + +func (c *secretInterceptor) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *corev1.Secret, err error) { + s, err := c.SecretInterface.Patch(name, pt, data, subresources...) + if err != nil { + return s, err + } + + c.t.Logf("Patch %s", s.Name) + if s.Name == c.secretName { + c.output <- s + } + + return s, nil +} + +func (d *lockStepDeployer) AddEventHandler(handler cache.ResourceEventHandler) []cache.InformerSynced { + d.lock.Lock() + defer d.lock.Unlock() + + d.handlers = append(d.handlers, handler) + + return []cache.InformerSynced{} +} + +func (d *lockStepDeployer) DeployedEncryptionConfigSecret() (secret *corev1.Secret, converged bool, err error) { + d.lock.Lock() + defer d.lock.Unlock() + + return d.current, true, nil +} + +func (d *lockStepDeployer) DeleteOperandConfig() { + d.lock.Lock() + old := d.current + d.current = nil + d.next = nil + handlers := make([]cache.ResourceEventHandler, len(d.handlers)) + copy(handlers, d.handlers) + d.lock.Unlock() + + for _, h := range handlers { + h.OnDelete(old) + } +} + +type T interface { + require.TestingT + Logf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Helper() +} + +type fmtLogger struct { + *testing.T +} + +func (l fmtLogger) Errorf(format string, args ...interface{}) { + l.T.Helper() + fmt.Printf(format+"\n", args...) + l.T.Errorf(format, args...) +} + +func (l fmtLogger) Logf(format string, args ...interface{}) { + l.T.Helper() + fmt.Printf("STEP: "+format+"\n", args...) + l.T.Logf(format, args...) +} diff --git a/vendor/github.com/openshift/library-go/test/library/client.go b/vendor/github.com/openshift/library-go/test/library/client.go new file mode 100644 index 00000000000..06f6192b21e --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/client.go @@ -0,0 +1,19 @@ +package library + +import ( + "fmt" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +// NewClientConfigForTest returns a config configured to connect to the api server +func NewClientConfigForTest() (*rest.Config, error) { + loader := clientcmd.NewDefaultClientConfigLoadingRules() + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loader, &clientcmd.ConfigOverrides{ClusterInfo: api.Cluster{InsecureSkipTLSVerify: true}}) + config, err := clientConfig.ClientConfig() + if err == nil { + fmt.Printf("Found configuration for host %v.\n", config.Host) + } + return config, err +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/assertion.go b/vendor/github.com/openshift/library-go/test/library/encryption/assertion.go new file mode 100644 index 00000000000..7f6dd529801 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/assertion.go @@ -0,0 +1,175 @@ +package encryption + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "reflect" + "testing" + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/stretchr/testify/require" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1" + "k8s.io/client-go/kubernetes" +) + +var protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00} + +var ( + apiserverScheme = runtime.NewScheme() + apiserverCodecs = serializer.NewCodecFactory(apiserverScheme) +) + +const ( + jsonEncodingPrefix = "{" + protoEncryptedDataPrefix = "k8s:enc:" + aesCBCTransformerPrefixV1 = "k8s:enc:aescbc:v1:" + secretboxTransformerPrefixV1 = "k8s:enc:secretbox:v1:" +) + +func init() { + utilruntime.Must(apiserverconfigv1.AddToScheme(apiserverScheme)) +} + +// AssertEncryptionConfig checks if the encryption config holds only targetGRs, this ensures that only those resources were encrypted, +// we don't check the keys because e2e tests are run randomly and we would have to consider all encryption secrets to get the right order of the keys. +// We test the content of the encryption config in more detail in unit and integration tests +func AssertEncryptionConfig(t testing.TB, clientSet ClientSet, encryptionConfigSecretName string, namespace string, targetGRs []schema.GroupResource) { + t.Helper() + t.Logf("Checking if %q in %q has desired GRs %v", encryptionConfigSecretName, namespace, targetGRs) + encryptionCofnigSecret, err := clientSet.Kube.CoreV1().Secrets(namespace).Get(encryptionConfigSecretName, metav1.GetOptions{}) + require.NoError(t, err) + encodedEncryptionConfig, foundEncryptionConfig := encryptionCofnigSecret.Data["encryption-config"] + if !foundEncryptionConfig { + t.Errorf("Haven't found encryption config at %q key in the encryption secret %q", "encryption-config", encryptionConfigSecretName) + } + + decoder := apiserverCodecs.UniversalDecoder(apiserverconfigv1.SchemeGroupVersion) + encryptionConfigObj, err := runtime.Decode(decoder, encodedEncryptionConfig) + require.NoError(t, err) + encryptionConfig, ok := encryptionConfigObj.(*apiserverconfigv1.EncryptionConfiguration) + if !ok { + t.Errorf("Unable to decode encryption config, unexpected wrong type %T", encryptionConfigObj) + } + + for _, rawActualResource := range encryptionConfig.Resources { + if len(rawActualResource.Resources) != 1 { + t.Errorf("Invalid encryption config for resource %s, expected exactly one resource, got %d", rawActualResource.Resources, len(rawActualResource.Resources)) + } + actualResource := schema.ParseGroupResource(rawActualResource.Resources[0]) + actualResourceFound := false + for _, expectedResource := range targetGRs { + if reflect.DeepEqual(expectedResource, actualResource) { + actualResourceFound = true + break + } + } + if !actualResourceFound { + t.Errorf("Encryption config has an invalid resource %v", actualResource) + } + } +} + +func AssertLastMigratedKey(t testing.TB, kubeClient kubernetes.Interface, targetGRs []schema.GroupResource, namespace, labelSelector string) { + t.Helper() + expectedGRs := targetGRs + t.Logf("Checking if the last migrated key was used to encrypt %v", expectedGRs) + lastMigratedKeyMeta, err := GetLastKeyMeta(kubeClient, namespace, labelSelector) + require.NoError(t, err) + if len(lastMigratedKeyMeta.Name) == 0 { + t.Log("Nothing to check no new key was created") + return + } + + if len(expectedGRs) != len(lastMigratedKeyMeta.Migrated) { + t.Errorf("Wrong number of migrated resources for %q key, expected %d, got %d", lastMigratedKeyMeta.Name, len(expectedGRs), len(lastMigratedKeyMeta.Migrated)) + } + + for _, expectedGR := range expectedGRs { + if !hasResource(expectedGR, lastMigratedKeyMeta.Migrated) { + t.Errorf("%q wasn't used to encrypt %v, only %v", lastMigratedKeyMeta.Name, expectedGR, lastMigratedKeyMeta.Migrated) + } + } +} + +func VerifyResources(t testing.TB, etcdClient EtcdClient, etcdKeyPreifx string, expectedMode string, allowEmpty bool) (int, error) { + timeout, cancel := context.WithTimeout(context.Background(), 10*time.Minute) + defer cancel() + + resp, err := etcdClient.Get(timeout, etcdKeyPreifx, clientv3.WithPrefix()) + switch { + case err != nil: + return 0, fmt.Errorf("failed to list prefix %s: %v", etcdKeyPreifx, err) + case (resp.Count == 0 || len(resp.Kvs) == 0) && !allowEmpty: + return 0, fmt.Errorf("empty list response for prefix %s: %+v", etcdKeyPreifx, resp) + case resp.More: + return 0, fmt.Errorf("incomplete list response for prefix %s: %+v", etcdKeyPreifx, resp) + } + + for _, keyValue := range resp.Kvs { + if err := verifyPrefixForRawData(expectedMode, keyValue.Value); err != nil { + return 0, fmt.Errorf("key %s failed check: %v\n%s", keyValue.Key, err, hex.Dump(keyValue.Value)) + } + } + + return len(resp.Kvs), nil +} + +func verifyPrefixForRawData(expectedMode string, data []byte) error { + if len(data) == 0 { + return fmt.Errorf("empty data") + } + + conditionToStr := func(condition bool) string { + if condition { + return "encrypted" + } + return "unencrypted" + } + + expectedEncrypted := true + if expectedMode == "identity" { + expectedMode = "identity-proto" + expectedEncrypted = false + } + + actualMode, isEncrypted := encryptionModeFromEtcdValue(data) + if expectedEncrypted != isEncrypted { + return fmt.Errorf("unexpected encrypted state, expected data to be %q but was %q with mode %q", conditionToStr(expectedEncrypted), conditionToStr(isEncrypted), actualMode) + } + if actualMode != expectedMode { + return fmt.Errorf("unexpected encryption mode %q, expected %q, was data encrypted/decrypted with a wrong key", actualMode, expectedMode) + } + + return nil +} + +func encryptionModeFromEtcdValue(data []byte) (string, bool) { + isEncrypted := bytes.HasPrefix(data, []byte(protoEncryptedDataPrefix)) // all encrypted data has this prefix + return func() string { + switch { + case hasPrefixAndTrailingData(data, []byte(aesCBCTransformerPrefixV1)): // AES-CBC has this prefix + return "aescbc" + case hasPrefixAndTrailingData(data, []byte(secretboxTransformerPrefixV1)): // Secretbox has this prefix + return "secretbox" + case hasPrefixAndTrailingData(data, []byte(jsonEncodingPrefix)): // unencrypted json data has this prefix + return "identity-json" + case hasPrefixAndTrailingData(data, protoEncodingPrefix): // unencrypted protobuf data has this prefix + return "identity-proto" + default: + return "unknown" // this should never happen + } + }(), isEncrypted +} + +func hasPrefixAndTrailingData(data, prefix []byte) bool { + return bytes.HasPrefix(data, prefix) && len(data) > len(prefix) +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/e_log.go b/vendor/github.com/openshift/library-go/test/library/encryption/e_log.go new file mode 100644 index 00000000000..47a66673283 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/e_log.go @@ -0,0 +1,89 @@ +package encryption + +import ( + "fmt" + "os" + "testing" +) + +// E is like testing.T except it overloads some methods to print to stdout +// when the encryption tests are run from a local machine +type E struct { + *testing.T + local bool + tearDownFunc func(testing.TB, bool) +} + +func PrintEventsOnFailure(namespace string) func(*E) { + return func(e *E) { + e.registerTearDownFun(setUpTearDown(namespace)) + } +} + +func NewE(t *testing.T, options ...func(*E)) *E { + e := &E{T: t} + // the test logger only prints text if a test fails or the -v flag is set + // that means we don't have any visibility when running the tests from a local machine + // + // thus std logger will be used when the test are run from a local machine to give instant feedback + if len(os.Getenv("PROW_JOB_ID")) == 0 { + e.local = true + } + + for _, option := range options { + option(e) + } + + return e +} + +func (e *E) Log(args ...interface{}) { + if e.local { + fmt.Println(args...) + return + } + e.T.Log(args...) +} + +func (e *E) Logf(format string, args ...interface{}) { + if e.local { + fmt.Printf(fmt.Sprintf("%s\n", format), args...) + return + } + e.T.Logf(format, args...) +} + +func (e *E) Errorf(format string, args ...interface{}) { + if e.local { + e.Logf(fmt.Sprintf("ERROR: %s", format), args...) + e.handleTearDown(true) + os.Exit(-1) + } + e.T.Errorf(format, args...) + e.handleTearDown(e.Failed()) +} + +func (e *E) Error(args ...interface{}) { + if e.local { + e.Errorf("%v", args...) + } + e.Errorf("%v", args...) +} + +func (e *E) Fatalf(format string, args ...interface{}) { + panic("Use require.NoError instead of t.Fatal so that TearDown can dump debugging info on failure") +} + +func (e *E) Fatal(args ...interface{}) { + panic("Use require.NoError instead of t.Fatal so that TearDown can dump debugging info on failure") +} + +func (e *E) registerTearDownFun(tearDownFunc func(testing.TB, bool)) { + e.tearDownFunc = tearDownFunc +} + +func (e *E) handleTearDown(failed bool) { + if e.tearDownFunc != nil { + e.tearDownFunc(e, failed) + } +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/errors.go b/vendor/github.com/openshift/library-go/test/library/encryption/errors.go new file mode 100644 index 00000000000..0847cf37cd9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/errors.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package encryption + +import ( + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" +) + +// isConnectionRefusedError checks if the error string include "connection refused" +// TODO: find a "go-way" to detect this error, probably using *os.SyscallError +func isConnectionRefusedError(err error) bool { + return strings.Contains(err.Error(), "connection refused") +} + +// transientAPIError returns true if the provided error indicates that a retry +// against an HA server has a good chance to succeed. +func transientAPIError(err error) bool { + switch { + case err == nil: + return false + case errors.IsServerTimeout(err), errors.IsTooManyRequests(err), net.IsProbableEOF(err), net.IsConnectionReset(err), net.IsNoRoutesError(err), isConnectionRefusedError(err): + return true + default: + return false + } +} + +func orError(a, b func(error) bool) func(error) bool { + return func(err error) bool { + return a(err) || b(err) + } +} + +func onErrorWithTimeout(timeout time.Duration, backoff wait.Backoff, errorFunc func(error) bool, fn func() error) error { + var lastMatchingError error + stopCh := time.After(timeout) + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + select { + case <-stopCh: + return false, wait.ErrWaitTimeout + default: + } + err := fn() + switch { + case err == nil: + return true, nil + case errorFunc(err): + lastMatchingError = err + return false, nil + default: + return false, err + } + }) + if err == wait.ErrWaitTimeout && lastMatchingError != nil { + err = lastMatchingError + } + return err +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/etcd_client.go b/vendor/github.com/openshift/library-go/test/library/encryption/etcd_client.go new file mode 100644 index 00000000000..99a251b0944 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/etcd_client.go @@ -0,0 +1,113 @@ +package encryption + +import ( + "bufio" + "context" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/clientv3" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" +) + +type EtcdClient interface { + Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) +} + +func NewEtcdClient(kubeClient kubernetes.Interface) EtcdClient { + return &etcdWrapper{kubeClient: kubeClient} +} + +type etcdWrapper struct { + kubeClient kubernetes.Interface +} + +func (e *etcdWrapper) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) { + // we need to rebuild this port-forward based client every time so we can tolerate API server rollouts + clientInternal, done, err := e.newEtcdClientInternal() + if err != nil { + return nil, fmt.Errorf("failed to build port-forward based etcd client: %v", err) + } + defer done() + return clientInternal.Get(ctx, key, opts...) +} + +func (e *etcdWrapper) newEtcdClientInternal() (EtcdClient, func(), error) { + ctx, cancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext(ctx, "oc", "port-forward", "service/etcd", ":2379", "-n", "openshift-etcd") + + done := func() { + cancel() + _ = cmd.Wait() // wait to clean up resources but ignore returned error since cancel kills the process + } + + var err error // so we can clean up on error + defer func() { + if err != nil { + done() + } + }() + + stdOut, err := cmd.StdoutPipe() + if err != nil { + return nil, nil, err + } + + if err = cmd.Start(); err != nil { + return nil, nil, err + } + + scanner := bufio.NewScanner(stdOut) + if !scanner.Scan() { + return nil, nil, fmt.Errorf("failed to scan port forward std out") + } + if err = scanner.Err(); err != nil { + return nil, nil, err + } + output := scanner.Text() + + port := strings.TrimSuffix(strings.TrimPrefix(output, "Forwarding from 127.0.0.1:"), " -> 2379") + _, err = strconv.Atoi(port) + if err != nil { + return nil, nil, fmt.Errorf("port forward output not in expected format: %s", output) + } + + coreV1 := e.kubeClient.CoreV1() + etcdConfigMap, err := coreV1.ConfigMaps("openshift-config").Get("etcd-ca-bundle", metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + etcdSecret, err := coreV1.Secrets("openshift-config").Get("etcd-client", metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + + tlsConfig, err := restclient.TLSConfigFor(&restclient.Config{ + TLSClientConfig: restclient.TLSClientConfig{ + CertData: etcdSecret.Data[corev1.TLSCertKey], + KeyData: etcdSecret.Data[corev1.TLSPrivateKeyKey], + CAData: []byte(etcdConfigMap.Data["ca-bundle.crt"]), + }, + }) + if err != nil { + return nil, nil, err + } + + etcdClient3, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"https://127.0.0.1:" + port}, + DialTimeout: 30 * time.Second, + TLS: tlsConfig, + }) + if err != nil { + return nil, nil, err + } + + return etcdClient3.KV, done, nil +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/helpers.go b/vendor/github.com/openshift/library-go/test/library/encryption/helpers.go new file mode 100644 index 00000000000..41cbb76686e --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/helpers.go @@ -0,0 +1,306 @@ +package encryption + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" + + configv1 "github.com/openshift/api/config/v1" + configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + + "github.com/openshift/library-go/test/library" +) + +var ( + waitPollInterval = 15 * time.Second + waitPollTimeout = 60 * time.Minute // a happy path scenario needs to roll out 3 revisions each taking ~10 min + defaultEncryptionMode = string(configv1.EncryptionTypeIdentity) +) + +type ClientSet struct { + Etcd EtcdClient + ApiServerConfig configv1client.APIServerInterface + Kube kubernetes.Interface +} + +type EncryptionKeyMeta struct { + Name string + Migrated []schema.GroupResource + Mode string +} + +type UpdateUnsupportedConfigFunc func(raw []byte) error + +func SetAndWaitForEncryptionType(t testing.TB, encryptionType configv1.EncryptionType, defaultTargetGRs []schema.GroupResource, namespace, labelSelector string) ClientSet { + t.Helper() + t.Logf("Starting encryption e2e test for %q mode", encryptionType) + + clientSet := GetClients(t) + lastMigratedKeyMeta, err := GetLastKeyMeta(clientSet.Kube, namespace, labelSelector) + require.NoError(t, err) + + apiServer, err := clientSet.ApiServerConfig.Get("cluster", metav1.GetOptions{}) + require.NoError(t, err) + needsUpdate := apiServer.Spec.Encryption.Type != encryptionType + if needsUpdate { + t.Logf("Updating encryption type in the config file for APIServer to %q", encryptionType) + apiServer.Spec.Encryption.Type = encryptionType + _, err = clientSet.ApiServerConfig.Update(apiServer) + require.NoError(t, err) + } else { + t.Logf("APIServer is already configured to use %q mode", encryptionType) + } + + WaitForEncryptionKeyBasedOn(t, clientSet.Kube, lastMigratedKeyMeta, encryptionType, defaultTargetGRs, namespace, labelSelector) + return clientSet +} + +func GetClients(t testing.TB) ClientSet { + t.Helper() + + kubeConfig, err := library.NewClientConfigForTest() + require.NoError(t, err) + + configClient := configv1client.NewForConfigOrDie(kubeConfig) + apiServerConfigClient := configClient.APIServers() + + kubeClient := kubernetes.NewForConfigOrDie(kubeConfig) + etcdClient := NewEtcdClient(kubeClient) + + return ClientSet{Etcd: etcdClient, ApiServerConfig: apiServerConfigClient, Kube: kubeClient} +} + +func WaitForEncryptionKeyBasedOn(t testing.TB, kubeClient kubernetes.Interface, prevKeyMeta EncryptionKeyMeta, encryptionType configv1.EncryptionType, defaultTargetGRs []schema.GroupResource, namespace, labelSelector string) { + encryptionMode := string(encryptionType) + if encryptionMode == "" { + encryptionMode = defaultEncryptionMode + } + if len(prevKeyMeta.Name) == 0 { + prevKeyMeta.Mode = defaultEncryptionMode + } + + if prevKeyMeta.Mode == encryptionMode { + waitForNoNewEncryptionKey(t, kubeClient, prevKeyMeta, namespace, labelSelector) + return + } + WaitForNextMigratedKey(t, kubeClient, prevKeyMeta, defaultTargetGRs, namespace, labelSelector) +} + +func waitForNoNewEncryptionKey(t testing.TB, kubeClient kubernetes.Interface, prevKeyMeta EncryptionKeyMeta, namespace, labelSelector string) { + t.Helper() + // given that the happy path scenario needs ~30 min + // waiting 5 min to see if a new key hasn't been created seems to be enough. + waitNoKeyPollInterval := 15 * time.Second + waitNoKeyPollTimeout := 6 * time.Minute + waitDuration := 5 * time.Minute + + nextKeyName, err := determineNextEncryptionKeyName(prevKeyMeta.Name, labelSelector) + require.NoError(t, err) + t.Logf("Waiting up to %s to check if no new key %q will be crated, as the previous (%q) key's encryption mode (%q) is the same as the current/desired one", waitDuration.String(), nextKeyName, prevKeyMeta.Name, prevKeyMeta.Mode) + + observedTimestamp := time.Now() + if err := wait.Poll(waitNoKeyPollInterval, waitNoKeyPollTimeout, func() (bool, error) { + currentKeyMeta, err := GetLastKeyMeta(kubeClient, namespace, labelSelector) + if err != nil { + return false, err + } + + if currentKeyMeta.Name != prevKeyMeta.Name { + return false, fmt.Errorf("unexpected key observed %q, expected no new key", currentKeyMeta.Name) + } + + if time.Since(observedTimestamp) > waitDuration { + t.Logf("Haven't seen a new key for %s", waitDuration.String()) + return true, nil + } + + return false, nil + }); err != nil { + newErr := fmt.Errorf("failed to check if no new key will be created, err %v", err) + require.NoError(t, newErr) + } +} + +func WaitForNextMigratedKey(t testing.TB, kubeClient kubernetes.Interface, prevKeyMeta EncryptionKeyMeta, defaultTargetGRs []schema.GroupResource, namespace, labelSelector string) { + t.Helper() + + var err error + nextKeyName := "" + nextKeyName, err = determineNextEncryptionKeyName(prevKeyMeta.Name, labelSelector) + require.NoError(t, err) + if len(prevKeyMeta.Name) == 0 { + prevKeyMeta.Name = "no previous key" + prevKeyMeta.Migrated = defaultTargetGRs + } + + t.Logf("Waiting up to %s for the next key %q, previous key was %q", waitPollTimeout.String(), nextKeyName, prevKeyMeta.Name) + observedKeyName := prevKeyMeta.Name + if err := wait.Poll(waitPollInterval, waitPollTimeout, func() (bool, error) { + currentKeyMeta, err := GetLastKeyMeta(kubeClient, namespace, labelSelector) + if err != nil { + return false, err + } + + if currentKeyMeta.Name != observedKeyName { + if currentKeyMeta.Name != nextKeyName { + return false, fmt.Errorf("unexpected key observed %q, expected %q", currentKeyMeta.Name, nextKeyName) + } + t.Logf("Observed key %q, waiting up to %s until it will be used to migrate %v", currentKeyMeta.Name, waitPollTimeout.String(), prevKeyMeta.Migrated) + observedKeyName = currentKeyMeta.Name + } + + if currentKeyMeta.Name == nextKeyName { + if len(prevKeyMeta.Migrated) == len(currentKeyMeta.Migrated) { + for _, expectedGR := range prevKeyMeta.Migrated { + if !hasResource(expectedGR, prevKeyMeta.Migrated) { + return false, nil + } + } + t.Logf("Key %q was used to migrate %v", currentKeyMeta.Name, currentKeyMeta.Migrated) + return true, nil + } + } + return false, nil + }); err != nil { + newErr := fmt.Errorf("failed waiting for key %s to be used to migrate %v, due to %v", nextKeyName, prevKeyMeta.Migrated, err) + require.NoError(t, newErr) + } +} + +func GetLastKeyMeta(kubeClient kubernetes.Interface, namespace, labelSelector string) (EncryptionKeyMeta, error) { + secretsClient := kubeClient.CoreV1().Secrets(namespace) + var selectedSecrets *corev1.SecretList + err := onErrorWithTimeout(wait.ForeverTestTimeout, retry.DefaultBackoff, transientAPIError, func() (err error) { + selectedSecrets, err = secretsClient.List(metav1.ListOptions{LabelSelector: labelSelector}) + return + }) + if err != nil { + return EncryptionKeyMeta{}, err + } + + if len(selectedSecrets.Items) == 0 { + return EncryptionKeyMeta{}, nil + } + encryptionSecrets := make([]*corev1.Secret, 0, len(selectedSecrets.Items)) + for _, s := range selectedSecrets.Items { + encryptionSecrets = append(encryptionSecrets, s.DeepCopy()) + } + sort.Slice(encryptionSecrets, func(i, j int) bool { + iKeyID, _ := encryptionKeyNameToKeyID(encryptionSecrets[i].Name) + jKeyID, _ := encryptionKeyNameToKeyID(encryptionSecrets[j].Name) + return iKeyID > jKeyID + }) + lastKey := encryptionSecrets[0] + + type migratedGroupResources struct { + Resources []schema.GroupResource `json:"resources"` + } + + migrated := &migratedGroupResources{} + if v, ok := lastKey.Annotations["encryption.apiserver.operator.openshift.io/migrated-resources"]; ok && len(v) > 0 { + if err := json.Unmarshal([]byte(v), migrated); err != nil { + return EncryptionKeyMeta{}, err + } + } + mode := lastKey.Annotations["encryption.apiserver.operator.openshift.io/mode"] + + return EncryptionKeyMeta{Name: lastKey.Name, Migrated: migrated.Resources, Mode: mode}, nil +} + +func ForceKeyRotation(t testing.TB, updateUnsupportedConfig UpdateUnsupportedConfigFunc, reason string) error { + t.Logf("Forcing a new key rotation, reason %q", reason) + data := map[string]map[string]string{ + "encryption": { + "reason": reason, + }, + } + raw, err := json.Marshal(data) + if err != nil { + return err + } + + return onErrorWithTimeout(wait.ForeverTestTimeout, retry.DefaultBackoff, orError(errors.IsConflict, transientAPIError), func() error { + return updateUnsupportedConfig(raw) + }) +} + +// hasResource returns whether the given group resource is contained in the migrated group resource list. +func hasResource(expectedResource schema.GroupResource, actualResources []schema.GroupResource) bool { + for _, gr := range actualResources { + if gr == expectedResource { + return true + } + } + return false +} + +func encryptionKeyNameToKeyID(name string) (uint64, bool) { + lastIdx := strings.LastIndex(name, "-") + idString := name + if lastIdx >= 0 { + idString = name[lastIdx+1:] // this can never overflow since str[-1+1:] is + } + id, err := strconv.ParseUint(idString, 10, 0) + return id, err == nil +} + +func determineNextEncryptionKeyName(prevKeyName, labelSelector string) (string, error) { + if len(prevKeyName) > 0 { + prevKeyID, prevKeyValid := encryptionKeyNameToKeyID(prevKeyName) + if !prevKeyValid { + return "", fmt.Errorf("invalid key %q passed", prevKeyName) + } + nexKeyID := prevKeyID + 1 + return strings.Replace(prevKeyName, fmt.Sprintf("%d", prevKeyID), fmt.Sprintf("%d", nexKeyID), 1), nil + } + + ret := strings.Split(labelSelector, "=") + if len(ret) != 2 { + return "", fmt.Errorf("unable to read the component name from the label selector, wrong format of the selector, expected \"...openshift.io/component=name\", got %s", labelSelector) + } + + // no encryption key - the first one will look like the following + return fmt.Sprintf("encryption-key-%s-1", ret[1]), nil +} + +func setUpTearDown(namespace string) func(testing.TB, bool) { + return func(t testing.TB, failed bool) { + if failed { // we don't use t.Failed() because we handle termination differently when running on a local machine + t.Logf("Tearing Down %s", t.Name()) + eventsToPrint := 20 + clientSet := GetClients(t) + + eventList, err := clientSet.Kube.CoreV1().Events(namespace).List(metav1.ListOptions{}) + require.NoError(t, err) + + sort.Slice(eventList.Items, func(i, j int) bool { + first := eventList.Items[i] + second := eventList.Items[j] + return first.LastTimestamp.After(second.LastTimestamp.Time) + }) + + t.Logf("Dumping %d events from %q namespace", eventsToPrint, namespace) + now := time.Now() + if len(eventList.Items) > eventsToPrint { + eventList.Items = eventList.Items[:eventsToPrint] + } + for _, ev := range eventList.Items { + t.Logf("Last seen: %-15v Type: %-10v Reason: %-40v Source: %-55v Message: %v", now.Sub(ev.LastTimestamp.Time), ev.Type, ev.Reason, ev.Source.Component, ev.Message) + } + } + } +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/perf_helpers.go b/vendor/github.com/openshift/library-go/test/library/encryption/perf_helpers.go new file mode 100644 index 00000000000..2cb915ee932 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/perf_helpers.go @@ -0,0 +1,192 @@ +package encryption + +import ( + "fmt" + "sync" + "testing" + "time" + + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + + operatorv1 "github.com/openshift/api/operator/v1" + test "github.com/openshift/library-go/test/library" +) + +func watchForMigrationControllerProgressingConditionAsync(t testing.TB, getOperatorCondFn GetOperatorConditionsFuncType, migrationStartedCh chan time.Time) { + t.Helper() + go watchForMigrationControllerProgressingCondition(t, getOperatorCondFn, migrationStartedCh) +} + +func watchForMigrationControllerProgressingCondition(t testing.TB, getOperatorConditionsFn GetOperatorConditionsFuncType, migrationStartedCh chan time.Time) { + t.Helper() + + t.Logf("Waiting up to %s for the condition %q with the reason %q to be set to true", waitPollTimeout.String(), "EncryptionMigrationControllerProgressing", "Migrating") + err := wait.Poll(waitPollInterval, waitPollTimeout, func() (bool, error) { + conditions, err := getOperatorConditionsFn(t) + if err != nil { + return false, err + } + for _, cond := range conditions { + if cond.Type == "EncryptionMigrationControllerProgressing" && cond.Status == operatorv1.ConditionTrue { + t.Logf("EncryptionMigrationControllerProgressing condition observed at %v", cond.LastTransitionTime) + migrationStartedCh <- cond.LastTransitionTime.Time + return true, nil + } + } + return false, nil + }) + if err != nil { + t.Logf("failed waiting for the condition %q with the reason %q to be set to true", "EncryptionMigrationControllerProgressing", "Migrating") + } +} + +func populateDatabase(t testing.TB, workers int, dbLoaderFun DBLoaderFuncType, assertDBPopulatedFunc func(t testing.TB, errorStore map[string]int, statStore map[string]int)) { + t.Helper() + start := time.Now() + defer func() { + end := time.Now() + t.Logf("Populating etcd took %v", end.Sub(start)) + }() + + r := newRunner() + + // run executes loaderFunc for each worker + r.run(t, workers, dbLoaderFun) + + assertDBPopulatedFunc(t, r.errorStore, r.statsStore) +} + +type DBLoaderFuncType func(kubernetes.Interface, string, func(error) /*error collector*/, func(string) /*stats collector*/) error + +type runner struct { + errorStore map[string]int + lock *sync.Mutex + + statsStore map[string]int + lockStats *sync.Mutex + wg *sync.WaitGroup +} + +func newRunner() *runner { + r := &runner{} + + r.errorStore = map[string]int{} + r.lock = &sync.Mutex{} + r.statsStore = map[string]int{} + r.lockStats = &sync.Mutex{} + + r.wg = &sync.WaitGroup{} + + return r +} + +func (r *runner) run(t testing.TB, workers int, workFunc ...DBLoaderFuncType) { + t.Logf("Executing provided load function for %d workers", workers) + for i := 0; i < workers; i++ { + wrapper := func(wg *sync.WaitGroup) { + defer wg.Done() + kubeClient, err := newKubeClient(300, 600) + if err != nil { + t.Errorf("Unable to create a kube client for a worker due to %v", err) + r.collectError(err) + return + } + _ = runWorkFunctions(kubeClient, "", r.collectError, r.collectStat, workFunc...) + } + r.wg.Add(1) + go wrapper(r.wg) + } + r.wg.Wait() + t.Log("All workers completed successfully") +} + +func (r *runner) collectError(err error) { + r.lock.Lock() + defer r.lock.Unlock() + errCount, ok := r.errorStore[err.Error()] + if !ok { + r.errorStore[err.Error()] = 1 + return + } + errCount += 1 + r.errorStore[err.Error()] = errCount +} + +func (r *runner) collectStat(stat string) { + r.lockStats.Lock() + defer r.lockStats.Unlock() + statCount, ok := r.statsStore[stat] + if !ok { + r.statsStore[stat] = 1 + return + } + statCount += 1 + r.statsStore[stat] = statCount +} + +func runWorkFunctions(kubeClient kubernetes.Interface, namespace string, errorCollector func(error), statsCollector func(string), workFunc ...DBLoaderFuncType) error { + if len(namespace) == 0 { + namespace = createNamespaceName() + } + for _, work := range workFunc { + err := work(kubeClient, namespace, errorCollector, statsCollector) + if err != nil { + errorCollector(err) + return err + } + } + return nil +} + +func DBLoaderRepeat(times int, genNamespaceName bool, workToRepeatFunc ...DBLoaderFuncType) DBLoaderFuncType { + return DBLoaderRepeatParallel(times, 1, genNamespaceName, workToRepeatFunc...) +} + +func DBLoaderRepeatParallel(times int, workers int, genNamespaceName bool, workToRepeatFunc ...DBLoaderFuncType) DBLoaderFuncType { + return func(kubeClient kubernetes.Interface, namespace string, errorCollector func(error), statsCollector func(string)) error { + if times < workers { + panic("DBLoaderRepeat cannot be < workers") + } + wg := sync.WaitGroup{} + workPerWorker := times / workers + for w := 0; w < workers; w++ { + work := func() { + defer wg.Done() + for i := 0; i < workPerWorker; i++ { + if genNamespaceName { + namespace = createNamespaceName() + } + if err := runWorkFunctions(kubeClient, namespace, errorCollector, statsCollector, workToRepeatFunc...); err != nil { + errorCollector(err) + } + } + } + wg.Add(1) + go work() + } + wg.Wait() + return nil + } +} + +func createNamespaceName() string { + return fmt.Sprintf("encryption-%s", rand.String(10)) +} + +func newKubeClient(qps float32, burst int) (kubernetes.Interface, error) { + kubeConfig, err := test.NewClientConfigForTest() + if err != nil { + return nil, err + } + + kubeConfig.QPS = qps + kubeConfig.Burst = burst + + kubeClient, err := kubernetes.NewForConfig(kubeConfig) + if err != nil { + return nil, err + } + return kubeClient, nil +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/perf_scenarios.go b/vendor/github.com/openshift/library-go/test/library/encryption/perf_scenarios.go new file mode 100644 index 00000000000..4f16e785474 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/perf_scenarios.go @@ -0,0 +1,57 @@ +package encryption + +import ( + "testing" + "time" + + configv1 "github.com/openshift/api/config/v1" + v1 "github.com/openshift/api/operator/v1" +) + +type GetOperatorConditionsFuncType func(t testing.TB) ([]v1.OperatorCondition, error) + +type PerfScenario struct { + BasicScenario + GetOperatorConditionsFunc GetOperatorConditionsFuncType + + DBLoaderFunc DBLoaderFuncType + AssertDBPopulatedFunc func(t testing.TB, errorStore map[string]int, statStore map[string]int) + AssertMigrationTime func(t testing.TB, migrationTime time.Duration) + // DBLoaderWorker is the number of workers that will execute DBLoaderFunc + DBLoaderWorkers int +} + +func TestPerfEncryptionTypeAESCBC(t *testing.T, scenario PerfScenario) { + e := NewE(t, PrintEventsOnFailure(scenario.OperatorNamespace)) + migrationStartedCh := make(chan time.Time, 1) + + populateDatabase(e, scenario.DBLoaderWorkers, scenario.DBLoaderFunc, scenario.AssertDBPopulatedFunc) + watchForMigrationControllerProgressingConditionAsync(e, scenario.GetOperatorConditionsFunc, migrationStartedCh) + endTimeStamp := runTestEncryptionTypeAESCBCScenario(t, scenario.BasicScenario) + + select { + case migrationStarted := <-migrationStartedCh: + scenario.AssertMigrationTime(e, endTimeStamp.Sub(migrationStarted)) + default: + e.Error("unable to calculate the migration time, failed to observe when the migration has started") + } +} + +func runTestEncryptionTypeAESCBCScenario(tt *testing.T, scenario BasicScenario) time.Time { + var ts time.Time + TestEncryptionTypeAESCBC(tt, BasicScenario{ + Namespace: scenario.Namespace, + LabelSelector: scenario.LabelSelector, + EncryptionConfigSecretName: scenario.EncryptionConfigSecretName, + EncryptionConfigSecretNamespace: scenario.EncryptionConfigSecretNamespace, + OperatorNamespace: scenario.OperatorNamespace, + TargetGRs: scenario.TargetGRs, + AssertFunc: func(t testing.TB, clientSet ClientSet, expectedMode configv1.EncryptionType, namespace, labelSelector string) { + // Note that AssertFunc is executed after an encryption secret has been annotated + ts = time.Now() + scenario.AssertFunc(t, clientSet, expectedMode, scenario.Namespace, scenario.LabelSelector) + t.Logf("AssertFunc for TestEncryptionTypeAESCBC scenario took %v", time.Now().Sub(ts)) + }, + }) + return ts +} diff --git a/vendor/github.com/openshift/library-go/test/library/encryption/scenarios.go b/vendor/github.com/openshift/library-go/test/library/encryption/scenarios.go new file mode 100644 index 00000000000..9a92c985527 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/encryption/scenarios.go @@ -0,0 +1,133 @@ +package encryption + +import ( + "fmt" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + + configv1 "github.com/openshift/api/config/v1" +) + +type BasicScenario struct { + Namespace string + LabelSelector string + EncryptionConfigSecretName string + EncryptionConfigSecretNamespace string + OperatorNamespace string + TargetGRs []schema.GroupResource + AssertFunc func(t testing.TB, clientSet ClientSet, expectedMode configv1.EncryptionType, namespace, labelSelector string) +} + +func TestEncryptionTypeIdentity(t *testing.T, scenario BasicScenario) { + e := NewE(t, PrintEventsOnFailure(scenario.OperatorNamespace)) + clientSet := SetAndWaitForEncryptionType(e, configv1.EncryptionTypeIdentity, scenario.TargetGRs, scenario.Namespace, scenario.LabelSelector) + scenario.AssertFunc(e, clientSet, configv1.EncryptionTypeIdentity, scenario.Namespace, scenario.LabelSelector) +} + +func TestEncryptionTypeUnset(t *testing.T, scenario BasicScenario) { + e := NewE(t, PrintEventsOnFailure(scenario.OperatorNamespace)) + clientSet := SetAndWaitForEncryptionType(e, "", scenario.TargetGRs, scenario.Namespace, scenario.LabelSelector) + scenario.AssertFunc(e, clientSet, configv1.EncryptionTypeIdentity, scenario.Namespace, scenario.LabelSelector) +} + +func TestEncryptionTypeAESCBC(t *testing.T, scenario BasicScenario) { + e := NewE(t, PrintEventsOnFailure(scenario.OperatorNamespace)) + clientSet := SetAndWaitForEncryptionType(e, configv1.EncryptionTypeAESCBC, scenario.TargetGRs, scenario.Namespace, scenario.LabelSelector) + scenario.AssertFunc(e, clientSet, configv1.EncryptionTypeAESCBC, scenario.Namespace, scenario.LabelSelector) + AssertEncryptionConfig(e, clientSet, scenario.EncryptionConfigSecretName, scenario.EncryptionConfigSecretNamespace, scenario.TargetGRs) +} + +type OnOffScenario struct { + BasicScenario + CreateResourceFunc func(t testing.TB, clientSet ClientSet, namespace string) runtime.Object + AssertResourceEncryptedFunc func(t testing.TB, clientSet ClientSet, resource runtime.Object) + AssertResourceNotEncryptedFunc func(t testing.TB, clientSet ClientSet, resource runtime.Object) + ResourceFunc func(t testing.TB, namespace string) runtime.Object + ResourceName string +} + +func TestEncryptionTurnOnAndOff(t *testing.T, scenario OnOffScenario) { + scenarios := []struct { + name string + testFunc func(*testing.T) + }{ + {name: fmt.Sprintf("CreateAndStore%s", scenario.ResourceName), testFunc: func(t *testing.T) { + e := NewE(t) + scenario.CreateResourceFunc(e, GetClients(e), scenario.Namespace) + }}, + {name: "OnAESCBC", testFunc: func(t *testing.T) { TestEncryptionTypeAESCBC(t, scenario.BasicScenario) }}, + {name: fmt.Sprintf("Assert%sEncrypted", scenario.ResourceName), testFunc: func(t *testing.T) { + e := NewE(t) + scenario.AssertResourceEncryptedFunc(e, GetClients(e), scenario.ResourceFunc(e, scenario.Namespace)) + }}, + {name: "OffIdentity", testFunc: func(t *testing.T) { TestEncryptionTypeIdentity(t, scenario.BasicScenario) }}, + {name: fmt.Sprintf("Assert%sNotEncrypted", scenario.ResourceName), testFunc: func(t *testing.T) { + e := NewE(t) + scenario.AssertResourceNotEncryptedFunc(e, GetClients(e), scenario.ResourceFunc(e, scenario.Namespace)) + }}, + {name: "OnAESCBCSecond", testFunc: func(t *testing.T) { TestEncryptionTypeAESCBC(t, scenario.BasicScenario) }}, + {name: fmt.Sprintf("Assert%sEncryptedSecond", scenario.ResourceName), testFunc: func(t *testing.T) { + e := NewE(t) + scenario.AssertResourceEncryptedFunc(e, GetClients(e), scenario.ResourceFunc(e, scenario.Namespace)) + }}, + {name: "OffIdentitySecond", testFunc: func(t *testing.T) { TestEncryptionTypeIdentity(t, scenario.BasicScenario) }}, + {name: fmt.Sprintf("Assert%sNotEncryptedSecond", scenario.ResourceName), testFunc: func(t *testing.T) { + e := NewE(t) + scenario.AssertResourceNotEncryptedFunc(e, GetClients(e), scenario.ResourceFunc(e, scenario.Namespace)) + }}, + } + + // run scenarios + for _, testScenario := range scenarios { + t.Run(testScenario.name, testScenario.testFunc) + if t.Failed() { + t.Errorf("stopping the test as %q scenario failed", testScenario.name) + return + } + } +} + +type RotationScenario struct { + BasicScenario + CreateResourceFunc func(t testing.TB, clientSet ClientSet, namespace string) runtime.Object + GetRawResourceFunc func(t testing.TB, clientSet ClientSet, namespace string) string + UnsupportedConfigFunc UpdateUnsupportedConfigFunc +} + +// TestEncryptionRotation first encrypts data with aescbc key +// then it forces a key rotation by setting the "encyrption.Reason" in the operator's configuration file +func TestEncryptionRotation(t *testing.T, scenario RotationScenario) { + // test data + ns := scenario.Namespace + labelSelector := scenario.LabelSelector + + // step 1: create the desired resource + e := NewE(t) + clientSet := GetClients(e) + scenario.CreateResourceFunc(e, GetClients(e), ns) + + // step 2: run encryption aescbc scenario + TestEncryptionTypeAESCBC(t, scenario.BasicScenario) + + // step 3: take samples + rawEncryptedResourceWithKey1 := scenario.GetRawResourceFunc(e, clientSet, ns) + + // step 4: force key rotation and wait for migration to complete + lastMigratedKeyMeta, err := GetLastKeyMeta(clientSet.Kube, ns, labelSelector) + require.NoError(e, err) + require.NoError(e, ForceKeyRotation(e, scenario.UnsupportedConfigFunc, fmt.Sprintf("test-key-rotation-%s", rand.String(4)))) + WaitForNextMigratedKey(e, clientSet.Kube, lastMigratedKeyMeta, scenario.TargetGRs, ns, labelSelector) + scenario.AssertFunc(e, clientSet, configv1.EncryptionTypeAESCBC, ns, labelSelector) + + // step 5: verify if the provided resource was encrypted with a different key (step 2 vs step 4) + rawEncryptedResourceWithKey2 := scenario.GetRawResourceFunc(e, clientSet, ns) + if rawEncryptedResourceWithKey1 == rawEncryptedResourceWithKey2 { + t.Errorf("expected the resource to has a different content after a key rotation,\ncontentBeforeRotation %s\ncontentAfterRotation %s", rawEncryptedResourceWithKey1, rawEncryptedResourceWithKey2) + } + + // TODO: assert conditions - operator and encryption migration controller must report status as active not progressing, and not failing for all scenarios +} diff --git a/vendor/github.com/openshift/library-go/test/library/library.go b/vendor/github.com/openshift/library-go/test/library/library.go new file mode 100644 index 00000000000..e09acc64379 --- /dev/null +++ b/vendor/github.com/openshift/library-go/test/library/library.go @@ -0,0 +1,34 @@ +package library + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "regexp" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +var ( + WaitPollInterval = time.Second + WaitPollTimeout = 10 * time.Minute +) + +// GenerateNameForTest generates a name of the form `prefix + test name + random string` that +// can be used as a resource name. Convert the result to lowercase to use as a dns label. +func GenerateNameForTest(t *testing.T, prefix string) string { + n, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + require.NoError(t, err) + name := []byte(fmt.Sprintf("%s%s-%016x", prefix, t.Name(), n.Int64())) + // make the name (almost) suitable for use as a dns label + // only a-z, 0-9, and '-' allowed + name = regexp.MustCompile("[^a-zA-Z0-9]+").ReplaceAll(name, []byte("-")) + // collapse multiple `-` + name = regexp.MustCompile("-+").ReplaceAll(name, []byte("-")) + // ensure no `-` at beginning or end + return strings.Trim(string(name), "-") +} diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml index f8a63b308ba..00d04cb9b02 100644 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -3,8 +3,9 @@ sudo: false language: go go: - - 1.7.3 - - 1.8.1 + - 1.9.x + - 1.10.x + - 1.11.x - tip matrix: @@ -12,7 +13,7 @@ matrix: - go: tip install: - - go get github.com/golang/lint/golint + - go get golang.org/x/lint/golint - export PATH=$GOPATH/bin:$PATH - go install ./... diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md index b052414d129..7eacc5bdbe5 100644 --- a/vendor/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -86,8 +86,8 @@ fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) ``` -There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the flag pointers in your code. +There are helper functions available to get the value stored in a Flag if you have a FlagSet but find +it difficult to keep up with all of the pointers in your code. If you have a pflag.FlagSet with a flag called 'flagname' of type int you can use GetInt() to get the int value. But notice that 'flagname' must exist and it must be an int. GetString("flagname") will fail. diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go index 5af02f1a75a..3731370d6a5 100644 --- a/vendor/github.com/spf13/pflag/bool_slice.go +++ b/vendor/github.com/spf13/pflag/bool_slice.go @@ -71,6 +71,44 @@ func (s *boolSliceValue) String() string { return "[" + out + "]" } +func (s *boolSliceValue) fromString(val string) (bool, error) { + return strconv.ParseBool(val) +} + +func (s *boolSliceValue) toString(val bool) string { + return strconv.FormatBool(val) +} + +func (s *boolSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *boolSliceValue) Replace(val []string) error { + out := make([]bool, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *boolSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func boolSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/bool_slice_test.go b/vendor/github.com/spf13/pflag/bool_slice_test.go index b617dd237f3..3c5a274f07d 100644 --- a/vendor/github.com/spf13/pflag/bool_slice_test.go +++ b/vendor/github.com/spf13/pflag/bool_slice_test.go @@ -160,6 +160,29 @@ func TestBSCalledTwice(t *testing.T) { } } +func TestBSAsSliceValue(t *testing.T) { + var bs []bool + f := setUpBSFlagSet(&bs) + + in := []string{"true", "false"} + argfmt := "--bs=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"false"}) + } + }) + if len(bs) != 1 || bs[0] != false { + t.Fatalf("Expected ss to be overwritten with 'false', but got: %v", bs) + } +} + func TestBSBadQuoting(t *testing.T) { tests := []struct { diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index aa126e44d1c..a0b2679f71c 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -46,7 +46,7 @@ func (f *FlagSet) GetCount(name string) (int, error) { // CountVar defines a count flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) CountVar(p *int, name string, usage string) { f.CountVarP(p, name, "", usage) } @@ -69,7 +69,7 @@ func CountVarP(p *int, name, shorthand string, usage string) { // Count defines a count flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. -// A count flag will add 1 to its value evey time it is found on the command line +// A count flag will add 1 to its value every time it is found on the command line func (f *FlagSet) Count(name string, usage string) *int { p := new(int) f.CountVarP(p, name, "", usage) diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go index 52c6b6dc104..badadda53fd 100644 --- a/vendor/github.com/spf13/pflag/duration_slice.go +++ b/vendor/github.com/spf13/pflag/duration_slice.go @@ -51,6 +51,44 @@ func (s *durationSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *durationSliceValue) fromString(val string) (time.Duration, error) { + return time.ParseDuration(val) +} + +func (s *durationSliceValue) toString(val time.Duration) string { + return fmt.Sprintf("%s", val) +} + +func (s *durationSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *durationSliceValue) Replace(val []string) error { + out := make([]time.Duration, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *durationSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func durationSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/duration_slice_test.go b/vendor/github.com/spf13/pflag/duration_slice_test.go index 489b012ffda..651fbd8b5b3 100644 --- a/vendor/github.com/spf13/pflag/duration_slice_test.go +++ b/vendor/github.com/spf13/pflag/duration_slice_test.go @@ -144,6 +144,29 @@ func TestDSWithDefault(t *testing.T) { } } +func TestDSAsSliceValue(t *testing.T) { + var ds []time.Duration + f := setUpDSFlagSet(&ds) + + in := []string{"1ns", "2ns"} + argfmt := "--ds=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"3ns"}) + } + }) + if len(ds) != 1 || ds[0] != time.Duration(3) { + t.Fatalf("Expected ss to be overwritten with '3ns', but got: %v", ds) + } +} + func TestDSCalledTwice(t *testing.T) { var ds []time.Duration f := setUpDSFlagSet(&ds) diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 9beeda8ecca..24a5036e95b 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -57,9 +57,9 @@ that give one-letter shorthands for flags. You can use these by appending var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { - flag.BoolVarP("boolname", "b", true, "help message") + flag.BoolVarP(&flagvar, "boolname", "b", true, "help message") } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") + flag.VarP(&flagval, "varname", "v", "help message") Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. @@ -190,6 +190,18 @@ type Value interface { Type() string } +// SliceValue is a secondary interface to all flags which hold a list +// of values. This allows full control over the value of list flags, +// and avoids complicated marshalling and unmarshalling to csv. +type SliceValue interface { + // Append adds the specified value to the end of the flag value list. + Append(string) error + // Replace will fully overwrite any data currently in the flag value list. + Replace([]string) error + // GetSlice returns the flag value list as an array of strings. + GetSlice() []string +} + // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[NormalizedName]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go new file mode 100644 index 00000000000..caa352741a6 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float32Slice Value +type float32SliceValue struct { + value *[]float32 + changed bool +} + +func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue { + isv := new(float32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return err + } + out[i] = float32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float32SliceValue) Type() string { + return "float32Slice" +} + +func (s *float32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float32SliceValue) fromString(val string) (float32, error) { + t64, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(t64), nil +} + +func (s *float32SliceValue) toString(val float32) string { + return fmt.Sprintf("%f", val) +} + +func (s *float32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float32SliceValue) Replace(val []string) error { + out := make([]float32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float32{}, nil + } + ss := strings.Split(val, ",") + out := make([]float32, len(ss)) + for i, d := range ss { + var err error + var temp64 float64 + temp64, err = strconv.ParseFloat(d, 32) + if err != nil { + return nil, err + } + out[i] = float32(temp64) + + } + return out, nil +} + +// GetFloat32Slice return the []float32 value of a flag with the given name +func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) { + val, err := f.getFlagType(name, "float32Slice", float32SliceConv) + if err != nil { + return []float32{}, err + } + return val.([]float32), nil +} + +// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string. +// The argument p points to a []float32 variable in which to store the value of the flag. +func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string. +// The argument p points to a float32[] variable in which to store the value of the flag. +func Float32SliceVar(p *[]float32, name string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage) +} + +// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) { + CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage) +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + p := []float32{} + f.Float32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float32Slice defines a []float32 flag with specified name, default value, and usage string. +// The return value is the address of a []float32 variable that stores the value of the flag. +func Float32Slice(name string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, "", value, usage) +} + +// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash. +func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 { + return CommandLine.Float32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float32_slice_test.go b/vendor/github.com/spf13/pflag/float32_slice_test.go new file mode 100644 index 00000000000..997ce5c686a --- /dev/null +++ b/vendor/github.com/spf13/pflag/float32_slice_test.go @@ -0,0 +1,200 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpF32SFlagSet(f32sp *[]float32) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Float32SliceVar(f32sp, "f32s", []float32{}, "Command separated list!") + return f +} + +func setUpF32SFlagSetWithDefault(f32sp *[]float32) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Float32SliceVar(f32sp, "f32s", []float32{0.0, 1.0}, "Command separated list!") + return f +} + +func TestEmptyF32S(t *testing.T) { + var f32s []float32 + f := setUpF32SFlagSet(&f32s) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getF32S, err := f.GetFloat32Slice("f32s") + if err != nil { + t.Fatal("got an error from GetFloat32Slice():", err) + } + if len(getF32S) != 0 { + t.Fatalf("got f32s %v with len=%d but expected length=0", getF32S, len(getF32S)) + } +} + +func TestF32S(t *testing.T) { + var f32s []float32 + f := setUpF32SFlagSet(&f32s) + + vals := []string{"1.0", "2.0", "4.0", "3.0"} + arg := fmt.Sprintf("--f32s=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f32s { + d64, err := strconv.ParseFloat(vals[i], 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + + d := float32(d64) + if d != v { + t.Fatalf("expected f32s[%d] to be %s but got: %f", i, vals[i], v) + } + } + getF32S, err := f.GetFloat32Slice("f32s") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getF32S { + d64, err := strconv.ParseFloat(vals[i], 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + + d := float32(d64) + if d != v { + t.Fatalf("expected f32s[%d] to be %s but got: %f from GetFloat32Slice", i, vals[i], v) + } + } +} + +func TestF32SDefault(t *testing.T) { + var f32s []float32 + f := setUpF32SFlagSetWithDefault(&f32s) + + vals := []string{"0.0", "1.0"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f32s { + d64, err := strconv.ParseFloat(vals[i], 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + + d := float32(d64) + if d != v { + t.Fatalf("expected f32s[%d] to be %f but got: %f", i, d, v) + } + } + + getF32S, err := f.GetFloat32Slice("f32s") + if err != nil { + t.Fatal("got an error from GetFloat32Slice():", err) + } + for i, v := range getF32S { + d64, err := strconv.ParseFloat(vals[i], 32) + if err != nil { + t.Fatal("got an error from GetFloat32Slice():", err) + } + + d := float32(d64) + if d != v { + t.Fatalf("expected f32s[%d] to be %f from GetFloat32Slice but got: %f", i, d, v) + } + } +} + +func TestF32SWithDefault(t *testing.T) { + var f32s []float32 + f := setUpF32SFlagSetWithDefault(&f32s) + + vals := []string{"1.0", "2.0"} + arg := fmt.Sprintf("--f32s=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f32s { + d64, err := strconv.ParseFloat(vals[i], 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + + d := float32(d64) + if d != v { + t.Fatalf("expected f32s[%d] to be %f but got: %f", i, d, v) + } + } + + getF32S, err := f.GetFloat32Slice("f32s") + if err != nil { + t.Fatal("got an error from GetFloat32Slice():", err) + } + for i, v := range getF32S { + d64, err := strconv.ParseFloat(vals[i], 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + + d := float32(d64) + if d != v { + t.Fatalf("expected f32s[%d] to be %f from GetFloat32Slice but got: %f", i, d, v) + } + } +} + +func TestF32SAsSliceValue(t *testing.T) { + var f32s []float32 + f := setUpF32SFlagSet(&f32s) + + in := []string{"1.0", "2.0"} + argfmt := "--f32s=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"3.1"}) + } + }) + if len(f32s) != 1 || f32s[0] != 3.1 { + t.Fatalf("Expected ss to be overwritten with '3.1', but got: %v", f32s) + } +} + +func TestF32SCalledTwice(t *testing.T) { + var f32s []float32 + f := setUpF32SFlagSet(&f32s) + + in := []string{"1.0,2.0", "3.0"} + expected := []float32{1.0, 2.0, 3.0} + argfmt := "--f32s=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f32s { + if expected[i] != v { + t.Fatalf("expected f32s[%d] to be %f but got: %f", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go new file mode 100644 index 00000000000..85bf3073d50 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- float64Slice Value +type float64SliceValue struct { + value *[]float64 + changed bool +} + +func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue { + isv := new(float64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *float64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *float64SliceValue) Type() string { + return "float64Slice" +} + +func (s *float64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%f", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *float64SliceValue) fromString(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +func (s *float64SliceValue) toString(val float64) string { + return fmt.Sprintf("%f", val) +} + +func (s *float64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *float64SliceValue) Replace(val []string) error { + out := make([]float64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *float64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func float64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []float64{}, nil + } + ss := strings.Split(val, ",") + out := make([]float64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseFloat(d, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetFloat64Slice return the []float64 value of a flag with the given name +func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) { + val, err := f.getFlagType(name, "float64Slice", float64SliceConv) + if err != nil { + return []float64{}, err + } + return val.([]float64), nil +} + +// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string. +// The argument p points to a []float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string. +// The argument p points to a float64[] variable in which to store the value of the flag. +func Float64SliceVar(p *[]float64, name string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage) +} + +// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) { + CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage) +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + p := []float64{} + f.Float64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Float64Slice defines a []float64 flag with specified name, default value, and usage string. +// The return value is the address of a []float64 variable that stores the value of the flag. +func Float64Slice(name string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, "", value, usage) +} + +// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash. +func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 { + return CommandLine.Float64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/float64_slice_test.go b/vendor/github.com/spf13/pflag/float64_slice_test.go new file mode 100644 index 00000000000..43778ef1034 --- /dev/null +++ b/vendor/github.com/spf13/pflag/float64_slice_test.go @@ -0,0 +1,188 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpF64SFlagSet(f64sp *[]float64) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Float64SliceVar(f64sp, "f64s", []float64{}, "Command separated list!") + return f +} + +func setUpF64SFlagSetWithDefault(f64sp *[]float64) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Float64SliceVar(f64sp, "f64s", []float64{0.0, 1.0}, "Command separated list!") + return f +} + +func TestEmptyF64S(t *testing.T) { + var f64s []float64 + f := setUpF64SFlagSet(&f64s) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getF64S, err := f.GetFloat64Slice("f64s") + if err != nil { + t.Fatal("got an error from GetFloat64Slice():", err) + } + if len(getF64S) != 0 { + t.Fatalf("got f64s %v with len=%d but expected length=0", getF64S, len(getF64S)) + } +} + +func TestF64S(t *testing.T) { + var f64s []float64 + f := setUpF64SFlagSet(&f64s) + + vals := []string{"1.0", "2.0", "4.0", "3.0"} + arg := fmt.Sprintf("--f64s=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f64s { + d, err := strconv.ParseFloat(vals[i], 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected f64s[%d] to be %s but got: %f", i, vals[i], v) + } + } + getF64S, err := f.GetFloat64Slice("f64s") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getF64S { + d, err := strconv.ParseFloat(vals[i], 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected f64s[%d] to be %s but got: %f from GetFloat64Slice", i, vals[i], v) + } + } +} + +func TestF64SDefault(t *testing.T) { + var f64s []float64 + f := setUpF64SFlagSetWithDefault(&f64s) + + vals := []string{"0.0", "1.0"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f64s { + d, err := strconv.ParseFloat(vals[i], 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected f64s[%d] to be %f but got: %f", i, d, v) + } + } + + getF64S, err := f.GetFloat64Slice("f64s") + if err != nil { + t.Fatal("got an error from GetFloat64Slice():", err) + } + for i, v := range getF64S { + d, err := strconv.ParseFloat(vals[i], 64) + if err != nil { + t.Fatal("got an error from GetFloat64Slice():", err) + } + if d != v { + t.Fatalf("expected f64s[%d] to be %f from GetFloat64Slice but got: %f", i, d, v) + } + } +} + +func TestF64SWithDefault(t *testing.T) { + var f64s []float64 + f := setUpF64SFlagSetWithDefault(&f64s) + + vals := []string{"1.0", "2.0"} + arg := fmt.Sprintf("--f64s=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f64s { + d, err := strconv.ParseFloat(vals[i], 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected f64s[%d] to be %f but got: %f", i, d, v) + } + } + + getF64S, err := f.GetFloat64Slice("f64s") + if err != nil { + t.Fatal("got an error from GetFloat64Slice():", err) + } + for i, v := range getF64S { + d, err := strconv.ParseFloat(vals[i], 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected f64s[%d] to be %f from GetFloat64Slice but got: %f", i, d, v) + } + } +} + +func TestF64SAsSliceValue(t *testing.T) { + var f64s []float64 + f := setUpF64SFlagSet(&f64s) + + in := []string{"1.0", "2.0"} + argfmt := "--f64s=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"3.1"}) + } + }) + if len(f64s) != 1 || f64s[0] != 3.1 { + t.Fatalf("Expected ss to be overwritten with '3.1', but got: %v", f64s) + } +} + +func TestF64SCalledTwice(t *testing.T) { + var f64s []float64 + f := setUpF64SFlagSet(&f64s) + + in := []string{"1.0,2.0", "3.0"} + expected := []float64{1.0, 2.0, 3.0} + argfmt := "--f64s=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range f64s { + if expected[i] != v { + t.Fatalf("expected f64s[%d] to be %f but got: %f", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/go.mod b/vendor/github.com/spf13/pflag/go.mod new file mode 100644 index 00000000000..b2287eec134 --- /dev/null +++ b/vendor/github.com/spf13/pflag/go.mod @@ -0,0 +1,3 @@ +module github.com/spf13/pflag + +go 1.12 diff --git a/vendor/github.com/spf13/pflag/go.sum b/vendor/github.com/spf13/pflag/go.sum new file mode 100644 index 00000000000..e69de29bb2d diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go new file mode 100644 index 00000000000..ff128ff06d8 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32_slice.go @@ -0,0 +1,174 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int32Slice Value +type int32SliceValue struct { + value *[]int32 + changed bool +} + +func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue { + isv := new(int32SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int32SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return err + } + out[i] = int32(temp64) + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int32SliceValue) Type() string { + return "int32Slice" +} + +func (s *int32SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int32SliceValue) fromString(val string) (int32, error) { + t64, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(t64), nil +} + +func (s *int32SliceValue) toString(val int32) string { + return fmt.Sprintf("%d", val) +} + +func (s *int32SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int32SliceValue) Replace(val []string) error { + out := make([]int32, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int32SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int32SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int32{}, nil + } + ss := strings.Split(val, ",") + out := make([]int32, len(ss)) + for i, d := range ss { + var err error + var temp64 int64 + temp64, err = strconv.ParseInt(d, 0, 32) + if err != nil { + return nil, err + } + out[i] = int32(temp64) + + } + return out, nil +} + +// GetInt32Slice return the []int32 value of a flag with the given name +func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) { + val, err := f.getFlagType(name, "int32Slice", int32SliceConv) + if err != nil { + return []int32{}, err + } + return val.([]int32), nil +} + +// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string. +// The argument p points to a []int32 variable in which to store the value of the flag. +func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + f.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string. +// The argument p points to a int32[] variable in which to store the value of the flag. +func Int32SliceVar(p *[]int32, name string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage) +} + +// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) { + CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage) +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + p := []int32{} + f.Int32SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int32Slice defines a []int32 flag with specified name, default value, and usage string. +// The return value is the address of a []int32 variable that stores the value of the flag. +func Int32Slice(name string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, "", value, usage) +} + +// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash. +func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 { + return CommandLine.Int32SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int32_slice_test.go b/vendor/github.com/spf13/pflag/int32_slice_test.go new file mode 100644 index 00000000000..809c5633aa7 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int32_slice_test.go @@ -0,0 +1,194 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpI32SFlagSet(isp *[]int32) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Int32SliceVar(isp, "is", []int32{}, "Command separated list!") + return f +} + +func setUpI32SFlagSetWithDefault(isp *[]int32) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Int32SliceVar(isp, "is", []int32{0, 1}, "Command separated list!") + return f +} + +func TestEmptyI32S(t *testing.T) { + var is []int32 + f := setUpI32SFlagSet(&is) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getI32S, err := f.GetInt32Slice("is") + if err != nil { + t.Fatal("got an error from GetInt32Slice():", err) + } + if len(getI32S) != 0 { + t.Fatalf("got is %v with len=%d but expected length=0", getI32S, len(getI32S)) + } +} + +func TestI32S(t *testing.T) { + var is []int32 + f := setUpI32SFlagSet(&is) + + vals := []string{"1", "2", "4", "3"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d64, err := strconv.ParseInt(vals[i], 0, 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + d := int32(d64) + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v) + } + } + getI32S, err := f.GetInt32Slice("is") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getI32S { + d64, err := strconv.ParseInt(vals[i], 0, 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + d := int32(d64) + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d from GetInt32Slice", i, vals[i], v) + } + } +} + +func TestI32SDefault(t *testing.T) { + var is []int32 + f := setUpI32SFlagSetWithDefault(&is) + + vals := []string{"0", "1"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d64, err := strconv.ParseInt(vals[i], 0, 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + d := int32(d64) + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getI32S, err := f.GetInt32Slice("is") + if err != nil { + t.Fatal("got an error from GetInt32Slice():", err) + } + for i, v := range getI32S { + d64, err := strconv.ParseInt(vals[i], 0, 32) + if err != nil { + t.Fatal("got an error from GetInt32Slice():", err) + } + d := int32(d64) + if d != v { + t.Fatalf("expected is[%d] to be %d from GetInt32Slice but got: %d", i, d, v) + } + } +} + +func TestI32SWithDefault(t *testing.T) { + var is []int32 + f := setUpI32SFlagSetWithDefault(&is) + + vals := []string{"1", "2"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d64, err := strconv.ParseInt(vals[i], 0, 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + d := int32(d64) + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getI32S, err := f.GetInt32Slice("is") + if err != nil { + t.Fatal("got an error from GetInt32Slice():", err) + } + for i, v := range getI32S { + d64, err := strconv.ParseInt(vals[i], 0, 32) + if err != nil { + t.Fatalf("got error: %v", err) + } + d := int32(d64) + if d != v { + t.Fatalf("expected is[%d] to be %d from GetInt32Slice but got: %d", i, d, v) + } + } +} + +func TestI32SAsSliceValue(t *testing.T) { + var i32s []int32 + f := setUpI32SFlagSet(&i32s) + + in := []string{"1", "2"} + argfmt := "--is=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"3"}) + } + }) + if len(i32s) != 1 || i32s[0] != 3 { + t.Fatalf("Expected ss to be overwritten with '3.1', but got: %v", i32s) + } +} + +func TestI32SCalledTwice(t *testing.T) { + var is []int32 + f := setUpI32SFlagSet(&is) + + in := []string{"1,2", "3"} + expected := []int32{1, 2, 3} + argfmt := "--is=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + if expected[i] != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go new file mode 100644 index 00000000000..25464638f3a --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64_slice.go @@ -0,0 +1,166 @@ +package pflag + +import ( + "fmt" + "strconv" + "strings" +) + +// -- int64Slice Value +type int64SliceValue struct { + value *[]int64 + changed bool +} + +func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue { + isv := new(int64SliceValue) + isv.value = p + *isv.value = val + return isv +} + +func (s *int64SliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *int64SliceValue) Type() string { + return "int64Slice" +} + +func (s *int64SliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%d", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func (s *int64SliceValue) fromString(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +func (s *int64SliceValue) toString(val int64) string { + return fmt.Sprintf("%d", val) +} + +func (s *int64SliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *int64SliceValue) Replace(val []string) error { + out := make([]int64, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *int64SliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + +func int64SliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int64{}, nil + } + ss := strings.Split(val, ",") + out := make([]int64, len(ss)) + for i, d := range ss { + var err error + out[i], err = strconv.ParseInt(d, 0, 64) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetInt64Slice return the []int64 value of a flag with the given name +func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) { + val, err := f.getFlagType(name, "int64Slice", int64SliceConv) + if err != nil { + return []int64{}, err + } + return val.([]int64), nil +} + +// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string. +// The argument p points to a []int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + f.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string. +// The argument p points to a int64[] variable in which to store the value of the flag. +func Int64SliceVar(p *[]int64, name string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage) +} + +// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) { + CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage) +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, "", value, usage) + return &p +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + p := []int64{} + f.Int64SliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// Int64Slice defines a []int64 flag with specified name, default value, and usage string. +// The return value is the address of a []int64 variable that stores the value of the flag. +func Int64Slice(name string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, "", value, usage) +} + +// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash. +func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 { + return CommandLine.Int64SliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/int64_slice_test.go b/vendor/github.com/spf13/pflag/int64_slice_test.go new file mode 100644 index 00000000000..09805c7678f --- /dev/null +++ b/vendor/github.com/spf13/pflag/int64_slice_test.go @@ -0,0 +1,188 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "fmt" + "strconv" + "strings" + "testing" +) + +func setUpI64SFlagSet(isp *[]int64) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Int64SliceVar(isp, "is", []int64{}, "Command separated list!") + return f +} + +func setUpI64SFlagSetWithDefault(isp *[]int64) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.Int64SliceVar(isp, "is", []int64{0, 1}, "Command separated list!") + return f +} + +func TestEmptyI64S(t *testing.T) { + var is []int64 + f := setUpI64SFlagSet(&is) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getI64S, err := f.GetInt64Slice("is") + if err != nil { + t.Fatal("got an error from GetInt64Slice():", err) + } + if len(getI64S) != 0 { + t.Fatalf("got is %v with len=%d but expected length=0", getI64S, len(getI64S)) + } +} + +func TestI64S(t *testing.T) { + var is []int64 + f := setUpI64SFlagSet(&is) + + vals := []string{"1", "2", "4", "3"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.ParseInt(vals[i], 0, 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v) + } + } + getI64S, err := f.GetInt64Slice("is") + if err != nil { + t.Fatalf("got error: %v", err) + } + for i, v := range getI64S { + d, err := strconv.ParseInt(vals[i], 0, 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %s but got: %d from GetInt64Slice", i, vals[i], v) + } + } +} + +func TestI64SDefault(t *testing.T) { + var is []int64 + f := setUpI64SFlagSetWithDefault(&is) + + vals := []string{"0", "1"} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.ParseInt(vals[i], 0, 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getI64S, err := f.GetInt64Slice("is") + if err != nil { + t.Fatal("got an error from GetInt64Slice():", err) + } + for i, v := range getI64S { + d, err := strconv.ParseInt(vals[i], 0, 64) + if err != nil { + t.Fatal("got an error from GetInt64Slice():", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetInt64Slice but got: %d", i, d, v) + } + } +} + +func TestI64SWithDefault(t *testing.T) { + var is []int64 + f := setUpI64SFlagSetWithDefault(&is) + + vals := []string{"1", "2"} + arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + d, err := strconv.ParseInt(vals[i], 0, 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, d, v) + } + } + + getI64S, err := f.GetInt64Slice("is") + if err != nil { + t.Fatal("got an error from GetInt64Slice():", err) + } + for i, v := range getI64S { + d, err := strconv.ParseInt(vals[i], 0, 64) + if err != nil { + t.Fatalf("got error: %v", err) + } + if d != v { + t.Fatalf("expected is[%d] to be %d from GetInt64Slice but got: %d", i, d, v) + } + } +} + +func TestI64SAsSliceValue(t *testing.T) { + var i64s []int64 + f := setUpI64SFlagSet(&i64s) + + in := []string{"1", "2"} + argfmt := "--is=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"3"}) + } + }) + if len(i64s) != 1 || i64s[0] != 3 { + t.Fatalf("Expected ss to be overwritten with '3.1', but got: %v", i64s) + } +} + +func TestI64SCalledTwice(t *testing.T) { + var is []int64 + f := setUpI64SFlagSet(&is) + + in := []string{"1,2", "3"} + expected := []int64{1, 2, 3} + argfmt := "--is=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range is { + if expected[i] != v { + t.Fatalf("expected is[%d] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go index 1e7c9edde95..e71c39d91aa 100644 --- a/vendor/github.com/spf13/pflag/int_slice.go +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -51,6 +51,36 @@ func (s *intSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *intSliceValue) Append(val string) error { + i, err := strconv.Atoi(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *intSliceValue) Replace(val []string) error { + out := make([]int, len(val)) + for i, d := range val { + var err error + out[i], err = strconv.Atoi(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *intSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = strconv.Itoa(d) + } + return out +} + func intSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go index 7dd196fe3fb..775faae4fd8 100644 --- a/vendor/github.com/spf13/pflag/ip_slice.go +++ b/vendor/github.com/spf13/pflag/ip_slice.go @@ -72,9 +72,47 @@ func (s *ipSliceValue) String() string { return "[" + out + "]" } +func (s *ipSliceValue) fromString(val string) (net.IP, error) { + return net.ParseIP(strings.TrimSpace(val)), nil +} + +func (s *ipSliceValue) toString(val net.IP) string { + return val.String() +} + +func (s *ipSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *ipSliceValue) Replace(val []string) error { + out := make([]net.IP, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *ipSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func ipSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") - // Emtpy string would cause a slice with one (empty) entry + // Empty string would cause a slice with one (empty) entry if len(val) == 0 { return []net.IP{}, nil } diff --git a/vendor/github.com/spf13/pflag/ip_slice_test.go b/vendor/github.com/spf13/pflag/ip_slice_test.go index b0c681c5b2a..d1892768630 100644 --- a/vendor/github.com/spf13/pflag/ip_slice_test.go +++ b/vendor/github.com/spf13/pflag/ip_slice_test.go @@ -141,6 +141,29 @@ func TestIPSCalledTwice(t *testing.T) { } } +func TestIPSAsSliceValue(t *testing.T) { + var ips []net.IP + f := setUpIPSFlagSet(&ips) + + in := []string{"192.168.1.1", "0:0:0:0:0:0:0:1"} + argfmt := "--ips=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"192.168.1.2"}) + } + }) + if len(ips) != 1 || !ips[0].Equal(net.ParseIP("192.168.1.2")) { + t.Fatalf("Expected ss to be overwritten with '192.168.1.2', but got: %v", ips) + } +} + func TestIPSBadQuoting(t *testing.T) { tests := []struct { diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index fa7bc60187a..4894af81802 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -23,6 +23,32 @@ func (s *stringArrayValue) Set(val string) error { return nil } +func (s *stringArrayValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringArrayValue) Replace(val []string) error { + out := make([]string, len(val)) + for i, d := range val { + var err error + out[i] = d + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *stringArrayValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = d + } + return out +} + func (s *stringArrayValue) Type() string { return "stringArray" } diff --git a/vendor/github.com/spf13/pflag/string_array_test.go b/vendor/github.com/spf13/pflag/string_array_test.go index 1ceac8c6c69..3c6d595873d 100644 --- a/vendor/github.com/spf13/pflag/string_array_test.go +++ b/vendor/github.com/spf13/pflag/string_array_test.go @@ -193,6 +193,29 @@ func TestSAWithSpecialChar(t *testing.T) { } } +func TestSAAsSliceValue(t *testing.T) { + var sa []string + f := setUpSAFlagSet(&sa) + + in := []string{"1ns", "2ns"} + argfmt := "--sa=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"3ns"}) + } + }) + if len(sa) != 1 || sa[0] != "3ns" { + t.Fatalf("Expected ss to be overwritten with '3ns', but got: %v", sa) + } +} + func TestSAWithSquareBrackets(t *testing.T) { var sa []string f := setUpSAFlagSet(&sa) diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 0cd3ccc083e..3cb2e69dba0 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -62,6 +62,20 @@ func (s *stringSliceValue) String() string { return "[" + str + "]" } +func (s *stringSliceValue) Append(val string) error { + *s.value = append(*s.value, val) + return nil +} + +func (s *stringSliceValue) Replace(val []string) error { + *s.value = val + return nil +} + +func (s *stringSliceValue) GetSlice() []string { + return *s.value +} + func stringSliceConv(sval string) (interface{}, error) { sval = sval[1 : len(sval)-1] // An empty string would cause a slice with one (empty) string @@ -84,7 +98,7 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -100,7 +114,7 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // The argument p points to a []string variable in which to store the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { @@ -116,7 +130,7 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { @@ -136,7 +150,7 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // The return value is the address of a []string variable that stores the value of the flag. // Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. // For example: -// --ss="v1,v2" -ss="v3" +// --ss="v1,v2" --ss="v3" // will result in // []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { diff --git a/vendor/github.com/spf13/pflag/string_slice_test.go b/vendor/github.com/spf13/pflag/string_slice_test.go index c41f3bd660d..96924617291 100644 --- a/vendor/github.com/spf13/pflag/string_slice_test.go +++ b/vendor/github.com/spf13/pflag/string_slice_test.go @@ -251,3 +251,26 @@ func TestSSWithSquareBrackets(t *testing.T) { } } } + +func TestSSAsSliceValue(t *testing.T) { + var ss []string + f := setUpSSFlagSet(&ss) + + in := []string{"one", "two"} + argfmt := "--ss=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"three"}) + } + }) + if len(ss) != 1 || ss[0] != "three" { + t.Fatalf("Expected ss to be overwritten with 'three', but got: %s", ss) + } +} diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go new file mode 100644 index 00000000000..a807a04a0ba --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int64.go @@ -0,0 +1,149 @@ +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// -- stringToInt64 Value +type stringToInt64Value struct { + value *map[string]int64 + changed bool +} + +func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value { + ssv := new(stringToInt64Value) + ssv.value = p + *ssv.value = val + return ssv +} + +// Format: a=1,b=2 +func (s *stringToInt64Value) Set(val string) error { + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return err + } + } + if !s.changed { + *s.value = out + } else { + for k, v := range out { + (*s.value)[k] = v + } + } + s.changed = true + return nil +} + +func (s *stringToInt64Value) Type() string { + return "stringToInt64" +} + +func (s *stringToInt64Value) String() string { + var buf bytes.Buffer + i := 0 + for k, v := range *s.value { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return "[" + buf.String() + "]" +} + +func stringToInt64Conv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // An empty string would cause an empty map + if len(val) == 0 { + return map[string]int64{}, nil + } + ss := strings.Split(val, ",") + out := make(map[string]int64, len(ss)) + for _, pair := range ss { + kv := strings.SplitN(pair, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("%s must be formatted as key=value", pair) + } + var err error + out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64) + if err != nil { + return nil, err + } + } + return out, nil +} + +// GetStringToInt64 return the map[string]int64 value of a flag with the given name +func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) { + val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv) + if err != nil { + return map[string]int64{}, err + } + return val.(map[string]int64), nil +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + f.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64Var defines a string flag with specified name, default value, and usage string. +// The argument p point64s to a map[string]int64 variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage) +} + +// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) { + CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage) +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, "", value, usage) + return &p +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + p := map[string]int64{} + f.StringToInt64VarP(&p, name, shorthand, value, usage) + return &p +} + +// StringToInt64 defines a string flag with specified name, default value, and usage string. +// The return value is the address of a map[string]int64 variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, "", value, usage) +} + +// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash. +func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 { + return CommandLine.StringToInt64P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_to_int64_test.go b/vendor/github.com/spf13/pflag/string_to_int64_test.go new file mode 100644 index 00000000000..2b3f29895b5 --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_to_int64_test.go @@ -0,0 +1,156 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of ths2i source code s2i governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + "bytes" + "fmt" + "strconv" + "testing" +) + +func setUpS2I64FlagSet(s2ip *map[string]int64) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringToInt64Var(s2ip, "s2i", map[string]int64{}, "Command separated ls2it!") + return f +} + +func setUpS2I64FlagSetWithDefault(s2ip *map[string]int64) *FlagSet { + f := NewFlagSet("test", ContinueOnError) + f.StringToInt64Var(s2ip, "s2i", map[string]int64{"a": 1, "b": 2}, "Command separated ls2it!") + return f +} + +func createS2I64Flag(vals map[string]int64) string { + var buf bytes.Buffer + i := 0 + for k, v := range vals { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(strconv.FormatInt(v, 10)) + i++ + } + return buf.String() +} + +func TestEmptyS2I64(t *testing.T) { + var s2i map[string]int64 + f := setUpS2I64FlagSet(&s2i) + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + getS2I, err := f.GetStringToInt64("s2i") + if err != nil { + t.Fatal("got an error from GetStringToInt64():", err) + } + if len(getS2I) != 0 { + t.Fatalf("got s2i %v with len=%d but expected length=0", getS2I, len(getS2I)) + } +} + +func TestS2I64(t *testing.T) { + var s2i map[string]int64 + f := setUpS2I64FlagSet(&s2i) + + vals := map[string]int64{"a": 1, "b": 2, "d": 4, "c": 3} + arg := fmt.Sprintf("--s2i=%s", createS2I64Flag(vals)) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for k, v := range s2i { + if vals[k] != v { + t.Fatalf("expected s2i[%s] to be %d but got: %d", k, vals[k], v) + } + } + getS2I, err := f.GetStringToInt64("s2i") + if err != nil { + t.Fatalf("got error: %v", err) + } + for k, v := range getS2I { + if vals[k] != v { + t.Fatalf("expected s2i[%s] to be %d but got: %d from GetStringToInt64", k, vals[k], v) + } + } +} + +func TestS2I64Default(t *testing.T) { + var s2i map[string]int64 + f := setUpS2I64FlagSetWithDefault(&s2i) + + vals := map[string]int64{"a": 1, "b": 2} + + err := f.Parse([]string{}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for k, v := range s2i { + if vals[k] != v { + t.Fatalf("expected s2i[%s] to be %d but got: %d", k, vals[k], v) + } + } + + getS2I, err := f.GetStringToInt64("s2i") + if err != nil { + t.Fatal("got an error from GetStringToInt64():", err) + } + for k, v := range getS2I { + if vals[k] != v { + t.Fatalf("expected s2i[%s] to be %d from GetStringToInt64 but got: %d", k, vals[k], v) + } + } +} + +func TestS2I64WithDefault(t *testing.T) { + var s2i map[string]int64 + f := setUpS2I64FlagSetWithDefault(&s2i) + + vals := map[string]int64{"a": 1, "b": 2} + arg := fmt.Sprintf("--s2i=%s", createS2I64Flag(vals)) + err := f.Parse([]string{arg}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for k, v := range s2i { + if vals[k] != v { + t.Fatalf("expected s2i[%s] to be %d but got: %d", k, vals[k], v) + } + } + + getS2I, err := f.GetStringToInt64("s2i") + if err != nil { + t.Fatal("got an error from GetStringToInt64():", err) + } + for k, v := range getS2I { + if vals[k] != v { + t.Fatalf("expected s2i[%s] to be %d from GetStringToInt64 but got: %d", k, vals[k], v) + } + } +} + +func TestS2I64CalledTwice(t *testing.T) { + var s2i map[string]int64 + f := setUpS2I64FlagSet(&s2i) + + in := []string{"a=1,b=2", "b=3"} + expected := map[string]int64{"a": 1, "b": 3} + argfmt := "--s2i=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + for i, v := range s2i { + if expected[i] != v { + t.Fatalf("expected s2i[%s] to be %d but got: %d", i, expected[i], v) + } + } +} diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go index edd94c600af..5fa924835ed 100644 --- a/vendor/github.com/spf13/pflag/uint_slice.go +++ b/vendor/github.com/spf13/pflag/uint_slice.go @@ -50,6 +50,48 @@ func (s *uintSliceValue) String() string { return "[" + strings.Join(out, ",") + "]" } +func (s *uintSliceValue) fromString(val string) (uint, error) { + t, err := strconv.ParseUint(val, 10, 0) + if err != nil { + return 0, err + } + return uint(t), nil +} + +func (s *uintSliceValue) toString(val uint) string { + return fmt.Sprintf("%d", val) +} + +func (s *uintSliceValue) Append(val string) error { + i, err := s.fromString(val) + if err != nil { + return err + } + *s.value = append(*s.value, i) + return nil +} + +func (s *uintSliceValue) Replace(val []string) error { + out := make([]uint, len(val)) + for i, d := range val { + var err error + out[i], err = s.fromString(d) + if err != nil { + return err + } + } + *s.value = out + return nil +} + +func (s *uintSliceValue) GetSlice() []string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = s.toString(d) + } + return out +} + func uintSliceConv(val string) (interface{}, error) { val = strings.Trim(val, "[]") // Empty string would cause a slice with one (empty) entry diff --git a/vendor/github.com/spf13/pflag/uint_slice_test.go b/vendor/github.com/spf13/pflag/uint_slice_test.go index db1a19dc2d6..d0da4d0757b 100644 --- a/vendor/github.com/spf13/pflag/uint_slice_test.go +++ b/vendor/github.com/spf13/pflag/uint_slice_test.go @@ -140,6 +140,29 @@ func TestUISWithDefault(t *testing.T) { } } +func TestUISAsSliceValue(t *testing.T) { + var uis []uint + f := setUpUISFlagSet(&uis) + + in := []string{"1", "2"} + argfmt := "--uis=%s" + arg1 := fmt.Sprintf(argfmt, in[0]) + arg2 := fmt.Sprintf(argfmt, in[1]) + err := f.Parse([]string{arg1, arg2}) + if err != nil { + t.Fatal("expected no error; got", err) + } + + f.VisitAll(func(f *Flag) { + if val, ok := f.Value.(SliceValue); ok { + _ = val.Replace([]string{"3"}) + } + }) + if len(uis) != 1 || uis[0] != 3 { + t.Fatalf("Expected ss to be overwritten with '3.1', but got: %v", uis) + } +} + func TestUISCalledTwice(t *testing.T) { var uis []uint f := setUpUISFlagSet(&uis) diff --git a/vendor/golang.org/x/net/bpf/vm_bpf_test.go b/vendor/golang.org/x/net/bpf/vm_bpf_test.go index 77fa8fe4ab0..137eea160d6 100644 --- a/vendor/golang.org/x/net/bpf/vm_bpf_test.go +++ b/vendor/golang.org/x/net/bpf/vm_bpf_test.go @@ -12,6 +12,8 @@ import ( "golang.org/x/net/bpf" "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" + "golang.org/x/net/nettest" ) // A virtualMachine is a BPF virtual machine which can process an @@ -137,7 +139,7 @@ type osVirtualMachine struct { // testOSVM creates a virtualMachine which uses the OS's BPF VM by injecting // packets into a UDP listener with a BPF program attached to it. func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { - l, err := net.ListenPacket("udp4", "127.0.0.1:0") + l, err := nettest.NewLocalPacketListener("udp") if err != nil { t.Fatalf("failed to open OS VM UDP listener: %v", err) } @@ -147,12 +149,17 @@ func testOSVM(t *testing.T, filter []bpf.Instruction) (virtualMachine, func()) { t.Fatalf("failed to compile BPF program: %v", err) } - p := ipv4.NewPacketConn(l) - if err = p.SetBPF(prog); err != nil { + ip := l.LocalAddr().(*net.UDPAddr).IP + if ip.To4() != nil && ip.To16() == nil { + err = ipv4.NewPacketConn(l).SetBPF(prog) + } else { + err = ipv6.NewPacketConn(l).SetBPF(prog) + } + if err != nil { t.Fatalf("failed to attach BPF program to listener: %v", err) } - s, err := net.Dial("udp4", l.LocalAddr().String()) + s, err := net.Dial(l.LocalAddr().Network(), l.LocalAddr().String()) if err != nil { t.Fatalf("failed to dial connection to listener: %v", err) } diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go index f0d2e55bdc6..cf8947c3327 100644 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -129,7 +129,8 @@ func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { offset := int(ins.Off) - if !inBounds(len(in), offset, 0) { + // Size of LoadMemShift is always 1 byte + if !inBounds(len(in), offset, 1) { return 0, false } diff --git a/vendor/golang.org/x/net/bpf/vm_load_test.go b/vendor/golang.org/x/net/bpf/vm_load_test.go index 04578b66b4d..d57e4af1ed7 100644 --- a/vendor/golang.org/x/net/bpf/vm_load_test.go +++ b/vendor/golang.org/x/net/bpf/vm_load_test.go @@ -13,55 +13,61 @@ import ( ) func TestVMLoadAbsoluteOffsetOutOfBounds(t *testing.T) { + pkt := []byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, 1, 2, 3, + } + vm, done, err := testVM(t, []bpf.Instruction{ bpf.LoadAbsolute{ - Off: 100, - Size: 2, + Off: uint32(len(pkt)), + Size: 1, }, - bpf.RetA{}, + // Out of bounds should return 0, return 1 to tell if execution continued + bpf.RetConstant{Val: 1}, }) if err != nil { t.Fatalf("failed to load BPF program: %v", err) } defer done() - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, 1, 2, 3, - }) + out, err := vm.Run(pkt) if err != nil { t.Fatalf("unexpected error while running program: %v", err) } if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + t.Fatalf("unexpected result:\n- want: %d\n- got: %d", want, got) } } func TestVMLoadAbsoluteOffsetPlusSizeOutOfBounds(t *testing.T) { + pkt := []byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + } + vm, done, err := testVM(t, []bpf.Instruction{ bpf.LoadAbsolute{ - Off: 8, + Off: uint32(len(pkt) - 1), Size: 2, }, - bpf.RetA{}, + // Out of bounds should return 0, return 1 to tell if execution continued + bpf.RetConstant{Val: 1}, }) if err != nil { t.Fatalf("failed to load BPF program: %v", err) } defer done() - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, - }) + out, err := vm.Run(pkt) if err != nil { t.Fatalf("unexpected error while running program: %v", err) } if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + t.Fatalf("unexpected result:\n- want: %d\n- got: %d", want, got) } } @@ -107,54 +113,60 @@ func TestVMLoadConstantOK(t *testing.T) { } func TestVMLoadIndirectOutOfBounds(t *testing.T) { + pkt := []byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + } + vm, done, err := testVM(t, []bpf.Instruction{ bpf.LoadIndirect{ - Off: 100, + Off: uint32(len(pkt)), Size: 1, }, - bpf.RetA{}, + // Out of bounds should return 0, return 1 to tell if execution continued + bpf.RetConstant{Val: 1}, }) if err != nil { t.Fatalf("failed to load BPF program: %v", err) } defer done() - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, - }) + out, err := vm.Run(pkt) if err != nil { t.Fatalf("unexpected error while running program: %v", err) } if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + t.Fatalf("unexpected result:\n- want: %d\n- got: %d", want, got) } } func TestVMLoadMemShiftOutOfBounds(t *testing.T) { + pkt := []byte{ + 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + 0, + } + vm, done, err := testVM(t, []bpf.Instruction{ bpf.LoadMemShift{ - Off: 100, + Off: uint32(len(pkt)), }, - bpf.RetA{}, + // Out of bounds should return 0, return 1 to tell if execution continued + bpf.RetConstant{Val: 1}, }) if err != nil { t.Fatalf("failed to load BPF program: %v", err) } defer done() - out, err := vm.Run([]byte{ - 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, - 0, - }) + out, err := vm.Run(pkt) if err != nil { t.Fatalf("unexpected error while running program: %v", err) } if want, got := 0, out; want != got { - t.Fatalf("unexpected number of output bytes:\n- want: %d\n- got: %d", + t.Fatalf("unexpected result:\n- want: %d\n- got: %d", want, got) } } diff --git a/vendor/golang.org/x/net/go.mod b/vendor/golang.org/x/net/go.mod new file mode 100644 index 00000000000..325937b4a8e --- /dev/null +++ b/vendor/golang.org/x/net/go.mod @@ -0,0 +1,9 @@ +module golang.org/x/net + +go 1.11 + +require ( + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a + golang.org/x/text v0.3.0 +) diff --git a/vendor/golang.org/x/net/go.sum b/vendor/golang.org/x/net/go.sum new file mode 100644 index 00000000000..0fa675a4b87 --- /dev/null +++ b/vendor/golang.org/x/net/go.sum @@ -0,0 +1,6 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 488e8d3cd63..992cff2a33a 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -439,9 +439,6 @@ func (p *parser) resetInsertionMode() { case a.Select: if !last { for ancestor, first := n, p.oe[0]; ancestor != first; { - if ancestor == first { - break - } ancestor = p.oe[p.oe.index(ancestor)-1] switch ancestor.DataAtom { case a.Template: @@ -633,7 +630,16 @@ func inHeadIM(p *parser) bool { p.oe.pop() p.acknowledgeSelfClosingTag() return true - case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + case a.Noscript: + p.addElement() + if p.scripting { + p.setOriginalIM() + p.im = textIM + } else { + p.im = inHeadNoscriptIM + } + return true + case a.Script, a.Title, a.Noframes, a.Style: p.addElement() p.setOriginalIM() p.im = textIM @@ -695,6 +701,49 @@ func inHeadIM(p *parser) bool { return false } +// 12.2.6.4.5. +func inHeadNoscriptIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Style: + return inHeadIM(p) + case a.Head, a.Noscript: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Noscript, a.Br: + default: + // Ignore the token. + return true + } + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) == 0 { + // It was all whitespace. + return inHeadIM(p) + } + case CommentToken: + return inHeadIM(p) + } + p.oe.pop() + if p.top().DataAtom != a.Head { + panic("html: the new current node will be a head element.") + } + p.im = inHeadIM + if p.tok.DataAtom == a.Noscript { + return true + } + return false +} + // Section 12.2.6.4.6. func afterHeadIM(p *parser) bool { switch p.tok.Type { @@ -904,7 +953,7 @@ func inBodyIM(p *parser) bool { case a.A: for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- { if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A { - p.inBodyEndTagFormatting(a.A) + p.inBodyEndTagFormatting(a.A, "a") p.oe.remove(n) p.afe.remove(n) break @@ -918,7 +967,7 @@ func inBodyIM(p *parser) bool { case a.Nobr: p.reconstructActiveFormattingElements() if p.elementInScope(defaultScope, a.Nobr) { - p.inBodyEndTagFormatting(a.Nobr) + p.inBodyEndTagFormatting(a.Nobr, "nobr") p.reconstructActiveFormattingElements() } p.addFormattingElement() @@ -1126,7 +1175,7 @@ func inBodyIM(p *parser) bool { case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6: p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6) case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U: - p.inBodyEndTagFormatting(p.tok.DataAtom) + p.inBodyEndTagFormatting(p.tok.DataAtom, p.tok.Data) case a.Applet, a.Marquee, a.Object: if p.popUntil(defaultScope, p.tok.DataAtom) { p.clearActiveFormattingElements() @@ -1137,7 +1186,7 @@ func inBodyIM(p *parser) bool { case a.Template: return inHeadIM(p) default: - p.inBodyEndTagOther(p.tok.DataAtom) + p.inBodyEndTagOther(p.tok.DataAtom, p.tok.Data) } case CommentToken: p.addChild(&Node{ @@ -1164,7 +1213,7 @@ func inBodyIM(p *parser) bool { return true } -func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { +func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom, tagName string) { // This is the "adoption agency" algorithm, described at // https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency @@ -1186,7 +1235,7 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { } } if formattingElement == nil { - p.inBodyEndTagOther(tagAtom) + p.inBodyEndTagOther(tagAtom, tagName) return } feIndex := p.oe.index(formattingElement) @@ -1291,9 +1340,17 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) { // inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM. // "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content // https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign -func (p *parser) inBodyEndTagOther(tagAtom a.Atom) { +func (p *parser) inBodyEndTagOther(tagAtom a.Atom, tagName string) { for i := len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == tagAtom { + // Two element nodes have the same tag if they have the same Data (a + // string-typed field). As an optimization, for common HTML tags, each + // Data string is assigned a unique, non-zero DataAtom (a uint32-typed + // field), since integer comparison is faster than string comparison. + // Uncommon (custom) tags get a zero DataAtom. + // + // The if condition here is equivalent to (p.oe[i].Data == tagName). + if (p.oe[i].DataAtom == tagAtom) && + ((tagAtom != 0) || (p.oe[i].Data == tagName)) { p.oe = p.oe[:i] break } @@ -1687,8 +1744,9 @@ func inCellIM(p *parser) bool { return true } // Close the cell and reprocess. - p.popUntil(tableScope, a.Td, a.Th) - p.clearActiveFormattingElements() + if p.popUntil(tableScope, a.Td, a.Th) { + p.clearActiveFormattingElements() + } p.im = inRowIM return false } @@ -2242,6 +2300,33 @@ func (p *parser) parse() error { // // The input is assumed to be UTF-8 encoded. func Parse(r io.Reader) (*Node, error) { + return ParseWithOptions(r) +} + +// ParseFragment parses a fragment of HTML and returns the nodes that were +// found. If the fragment is the InnerHTML for an existing element, pass that +// element in context. +// +// It has the same intricacies as Parse. +func ParseFragment(r io.Reader, context *Node) ([]*Node, error) { + return ParseFragmentWithOptions(r, context) +} + +// ParseOption configures a parser. +type ParseOption func(p *parser) + +// ParseOptionEnableScripting configures the scripting flag. +// https://html.spec.whatwg.org/multipage/webappapis.html#enabling-and-disabling-scripting +// +// By default, scripting is enabled. +func ParseOptionEnableScripting(enable bool) ParseOption { + return func(p *parser) { + p.scripting = enable + } +} + +// ParseWithOptions is like Parse, with options. +func ParseWithOptions(r io.Reader, opts ...ParseOption) (*Node, error) { p := &parser{ tokenizer: NewTokenizer(r), doc: &Node{ @@ -2251,6 +2336,11 @@ func Parse(r io.Reader) (*Node, error) { framesetOK: true, im: initialIM, } + + for _, f := range opts { + f(p) + } + err := p.parse() if err != nil { return nil, err @@ -2258,12 +2348,8 @@ func Parse(r io.Reader) (*Node, error) { return p.doc, nil } -// ParseFragment parses a fragment of HTML and returns the nodes that were -// found. If the fragment is the InnerHTML for an existing element, pass that -// element in context. -// -// It has the same intricacies as Parse. -func ParseFragment(r io.Reader, context *Node) ([]*Node, error) { +// ParseFragmentWithOptions is like ParseFragment, with options. +func ParseFragmentWithOptions(r io.Reader, context *Node, opts ...ParseOption) ([]*Node, error) { contextTag := "" if context != nil { if context.Type != ElementNode { @@ -2287,6 +2373,10 @@ func ParseFragment(r io.Reader, context *Node) ([]*Node, error) { context: context, } + for _, f := range opts { + f(p) + } + root := &Node{ Type: ElementNode, DataAtom: a.Html, diff --git a/vendor/golang.org/x/net/html/parse_test.go b/vendor/golang.org/x/net/html/parse_test.go index 9bba918c517..b16d69ac9f4 100644 --- a/vendor/golang.org/x/net/html/parse_test.go +++ b/vendor/golang.org/x/net/html/parse_test.go @@ -238,11 +238,30 @@ func TestParser(t *testing.T) { } } +// Issue 16318 +func TestParserWithoutScripting(t *testing.T) { + text := `

` + want := `| +| +|