diff --git a/go.mod b/go.mod index 0a168a91d..e6719192a 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 github.com/operator-framework/operator-sdk v0.10.1-0.20190919225052-3a85983ecc72 github.com/pkg/errors v0.8.1 + github.com/prometheus/client_golang v1.1.0 // indirect github.com/spf13/pflag v1.0.3 gomodules.xyz/stow v0.2.0 k8s.io/api v0.0.0-20190612125737-db0771252981 diff --git a/go.sum b/go.sum index 477d6d51f..3305252d4 100644 --- a/go.sum +++ b/go.sum @@ -79,6 +79,8 @@ github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NR github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -136,8 +138,11 @@ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyG github.com/docker/distribution v0.0.0-20170726174610-edc3ab29cdff/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.6.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.0.0-20180612054059-a9fbbdc8dd87 h1:a9PI9K38c+lqsMzO5itpsaXd9BhUYWTC9GM7TN5Vn0U= github.com/docker/docker v0.0.0-20180612054059-a9fbbdc8dd87/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -251,6 +256,8 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -290,6 +297,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORR github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -302,6 +310,7 @@ github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpg github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.3/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -362,6 +371,8 @@ github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBv github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsonnet-bundler/jsonnet-bundler v0.1.0/go.mod h1:YKsSFc9VFhhLITkJS3X2PrRqWG9u2Jq99udTdDjQLfM= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -377,6 +388,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c/go.mod h1:4ZxfWkxwtc7dBeifERVVWRy9F9rTU9p0yCDgeCtlius= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -429,6 +441,7 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -459,7 +472,9 @@ github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII= github.com/opentracing-contrib/go-stdlib v0.0.0-20170113013457-1de4cc2120e7/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w= @@ -505,6 +520,9 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3 h1:9iH4JKXLzFbOAdtqv/a+j8aewx2Y8lAjAydhbaScPF8= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= @@ -516,6 +534,9 @@ github.com/prometheus/common v0.0.0-20190104105734-b1c43a6df3ae/go.mod h1:TNfzLD github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0 h1:7etb9YClo3a6HjLzfl6rIQaU+FDfi0VSX39io3aQ+DM= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -523,6 +544,9 @@ github.com/prometheus/procfs v0.0.0-20190104112138-b1a0a9a36d74/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084 h1:sofwID9zm4tzrgykg80hfFph1mryUeLRsUfoocVVmRY= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64/go.mod h1:oYrT4Vs22/NcnoVYXt5m4cIHP+znvgyusahVpyETKTw= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYLG25tN4g= @@ -551,6 +575,7 @@ github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJ github.com/shurcooL/vfsgen v0.0.0-20180711163814-62bca832be04/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= @@ -655,6 +680,8 @@ golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -695,6 +722,8 @@ golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438 h1:khxRGsvPk4n2y8I/mLLjp7e5dMTJmH75wvqS6nMwUtY= golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -786,6 +815,7 @@ gopkg.in/yaml.v2 v2.1.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/apis/redis/v1alpha1/distributedrediscluster_types.go b/pkg/apis/redis/v1alpha1/distributedrediscluster_types.go index 408192097..5391245c3 100644 --- a/pkg/apis/redis/v1alpha1/distributedrediscluster_types.go +++ b/pkg/apis/redis/v1alpha1/distributedrediscluster_types.go @@ -104,10 +104,12 @@ type DistributedRedisClusterStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html - Status ClusterStatus `json:"status"` - Reason string `json:"reason,omitempty"` - NumberOfMaster int32 `json:"numberOfMaster,omitempty"` - Nodes []RedisClusterNode `json:"nodes"` + Status ClusterStatus `json:"status"` + Reason string `json:"reason,omitempty"` + NumberOfMaster int32 `json:"numberOfMaster,omitempty"` + MinReplicationFactor int32 `json:"minReplicationFactor,omitempty"` + MaxReplicationFactor int32 `json:"maxReplicationFactor,omitempty"` + Nodes []RedisClusterNode `json:"nodes"` // The number of restore which reached phase Succeeded. // +optional RestoreSucceeded int32 `json:"restoreSucceeded,omitempty"` diff --git a/pkg/controller/distributedrediscluster/distributedrediscluster_controller.go b/pkg/controller/distributedrediscluster/distributedrediscluster_controller.go index 14b024f3a..46f77fd2d 100644 --- a/pkg/controller/distributedrediscluster/distributedrediscluster_controller.go +++ b/pkg/controller/distributedrediscluster/distributedrediscluster_controller.go @@ -17,6 +17,7 @@ import ( redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" "github.com/ucloud/redis-cluster-operator/pkg/config" + "github.com/ucloud/redis-cluster-operator/pkg/controller/heal" clustermanger "github.com/ucloud/redis-cluster-operator/pkg/controller/manager" "github.com/ucloud/redis-cluster-operator/pkg/k8sutil" "github.com/ucloud/redis-cluster-operator/pkg/redisutil" @@ -118,23 +119,24 @@ func (r *ReconcileDistributedRedisCluster) Reconcile(request reconcile.Request) err := r.client.Get(context.TODO(), request.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue return reconcile.Result{}, nil } - // Error reading the object - requeue the request. return reconcile.Result{}, err } - err = r.waitPodReady(instance) + ctx := &syncContext{ + cluster: instance, + reqLogger: reqLogger, + } + + err = r.ensureCluster(ctx) if err != nil { switch GetType(err) { case StopRetry: reqLogger.Info("invalid", "err", err) return reconcile.Result{}, nil } - reqLogger.WithValues("err", err).Info("requeue") + reqLogger.WithValues("err", err).Info("ensureCluster") new := instance.Status.DeepCopy() SetClusterScaling(new, err.Error()) r.updateClusterIfNeed(instance, new) @@ -145,12 +147,34 @@ func (r *ReconcileDistributedRedisCluster) Reconcile(request reconcile.Request) if err != nil { return reconcile.Result{}, Kubernetes.Wrap(err, "GetStatefulSetPods") } + + ctx.pods = clusterPods(redisClusterPods.Items) + reqLogger.V(6).Info("debug cluster pods", "", ctx.pods) + ctx.healer = clustermanger.NewHealer(&heal.CheckAndHeal{ + Logger: reqLogger, + PodControl: k8sutil.NewPodController(r.client), + Pods: ctx.pods, + DryRun: false, + }) + err = r.waitPodReady(ctx) + if err != nil { + switch GetType(err) { + case Kubernetes: + return reconcile.Result{}, err + } + reqLogger.WithValues("err", err).Info("waitPodReady") + new := instance.Status.DeepCopy() + SetClusterScaling(new, err.Error()) + r.updateClusterIfNeed(instance, new) + return reconcile.Result{RequeueAfter: requeueAfter}, nil + } + password, err := getClusterPassword(r.client, instance) if err != nil { return reconcile.Result{}, Kubernetes.Wrap(err, "getClusterPassword") } - admin, err := newRedisAdmin(redisClusterPods.Items, password, config.RedisConf()) + admin, err := newRedisAdmin(ctx.pods, password, config.RedisConf()) if err != nil { return reconcile.Result{}, Redis.Wrap(err, "newRedisAdmin") } @@ -163,7 +187,17 @@ func (r *ReconcileDistributedRedisCluster) Reconcile(request reconcile.Request) } } - err = r.waitForClusterJoin(instance, clusterInfos, admin) + requeue, err := ctx.healer.Heal(instance, clusterInfos, admin) + if err != nil { + return reconcile.Result{}, Redis.Wrap(err, "Heal") + } + if requeue { + return reconcile.Result{RequeueAfter: requeueAfter}, nil + } + + ctx.admin = admin + ctx.clusterInfos = clusterInfos + err = r.waitForClusterJoin(ctx) if err != nil { switch GetType(err) { case Requeue: @@ -193,13 +227,18 @@ func (r *ReconcileDistributedRedisCluster) Reconcile(request reconcile.Request) reqLogger.V(4).Info("buildClusterStatus", "status", status) r.updateClusterIfNeed(instance, status) - err = r.ensureCluster(instance, clusterInfos, admin) - if err != nil { - new := instance.Status.DeepCopy() - SetClusterFailed(new, err.Error()) - r.updateClusterIfNeed(instance, new) - return reconcile.Result{}, err + instance.Status = *status + if needClusterOperation(instance, reqLogger) { + reqLogger.Info(">>>>>> clustering") + err = r.sync(ctx) + if err != nil { + new := instance.Status.DeepCopy() + SetClusterFailed(new, err.Error()) + r.updateClusterIfNeed(instance, new) + return reconcile.Result{}, err + } } + newClusterInfos, err := admin.GetClusterInfos() if err != nil { if clusterInfos.Status == redisutil.ClusterInfosPartial { @@ -209,6 +248,5 @@ func (r *ReconcileDistributedRedisCluster) Reconcile(request reconcile.Request) newStatus := buildClusterStatus(newClusterInfos, redisClusterPods.Items, &instance.Status) SetClusterOK(newStatus, "OK") r.updateClusterIfNeed(instance, newStatus) - //return reconcile.Result{RequeueAfter: requeueEnsure}, nil - return reconcile.Result{}, nil + return reconcile.Result{RequeueAfter: requeueEnsure}, nil } diff --git a/pkg/controller/distributedrediscluster/helper.go b/pkg/controller/distributedrediscluster/helper.go index 22fd9dacc..a99831eee 100644 --- a/pkg/controller/distributedrediscluster/helper.go +++ b/pkg/controller/distributedrediscluster/helper.go @@ -6,6 +6,7 @@ import ( "net" "time" + "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -47,19 +48,20 @@ func getClusterPassword(client client.Client, cluster *redisv1alpha1.Distributed } // newRedisAdmin builds and returns new redis.Admin from the list of pods -func newRedisAdmin(pods []corev1.Pod, password string, cfg *config.Redis) (redisutil.IAdmin, error) { +func newRedisAdmin(pods []*corev1.Pod, password string, cfg *config.Redis) (redisutil.IAdmin, error) { nodesAddrs := []string{} for _, pod := range pods { redisPort := redisutil.DefaultRedisPort for _, container := range pod.Spec.Containers { - if container.Name == "redis-node" { + if container.Name == "redis" { for _, port := range container.Ports { - if port.Name == "redis" { + if port.Name == "client" { redisPort = fmt.Sprintf("%d", port.ContainerPort) } } } } + log.V(4).Info("append redis admin addr", "addr", pod.Status.PodIP, "port", redisPort) nodesAddrs = append(nodesAddrs, net.JoinHostPort(pod.Status.PodIP, redisPort)) } adminConfig := redisutil.AdminOptions{ @@ -143,3 +145,31 @@ func newRedisCluster(infos *redisutil.ClusterInfos, cluster *redisv1alpha1.Distr return rCluster, nodes, nil } + +func clusterPods(pods []corev1.Pod) []*corev1.Pod { + var podSlice []*corev1.Pod + for _, pod := range pods { + podPointer := pod + podSlice = append(podSlice, &podPointer) + } + return podSlice +} + +func needClusterOperation(cluster *redisv1alpha1.DistributedRedisCluster, reqLogger logr.Logger) bool { + if compareIntValue("NumberOfMaster", &cluster.Status.NumberOfMaster, &cluster.Spec.MasterSize) { + reqLogger.V(4).Info("needClusterOperation---NumberOfMaster") + return true + } + + if compareIntValue("MinReplicationFactor", &cluster.Status.MinReplicationFactor, &cluster.Spec.ClusterReplicas) { + reqLogger.V(4).Info("needClusterOperation---MinReplicationFactor") + return true + } + + if compareIntValue("MaxReplicationFactor", &cluster.Status.MaxReplicationFactor, &cluster.Spec.ClusterReplicas) { + reqLogger.V(4).Info("needClusterOperation---MaxReplicationFactor") + return true + } + + return false +} diff --git a/pkg/controller/distributedrediscluster/init_from_backup.go b/pkg/controller/distributedrediscluster/init_from_backup.go deleted file mode 100644 index 30358db70..000000000 --- a/pkg/controller/distributedrediscluster/init_from_backup.go +++ /dev/null @@ -1,11 +0,0 @@ -package distributedrediscluster - -import ( - "github.com/go-logr/logr" - - redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" -) - -func (r *ReconcileDistributedRedisCluster) initFromBackup(reqLogger logr.Logger, cluster *redisv1alpha1.DistributedRedisCluster) error { - return nil -} diff --git a/pkg/controller/distributedrediscluster/status.go b/pkg/controller/distributedrediscluster/status.go index 449cecde7..b677bc083 100644 --- a/pkg/controller/distributedrediscluster/status.go +++ b/pkg/controller/distributedrediscluster/status.go @@ -2,6 +2,7 @@ package distributedrediscluster import ( "fmt" + "math" "reflect" corev1 "k8s.io/api/core/v1" @@ -82,6 +83,22 @@ func buildClusterStatus(clusterInfos *redisutil.ClusterInfos, pods []corev1.Pod, } status.NumberOfMaster = nbMaster + minReplicationFactor := math.MaxInt32 + maxReplicationFactor := 0 + for _, counter := range nbSlaveByMaster { + if counter > maxReplicationFactor { + maxReplicationFactor = counter + } + if counter < minReplicationFactor { + minReplicationFactor = counter + } + } + if len(nbSlaveByMaster) == 0 { + minReplicationFactor = 0 + } + status.MaxReplicationFactor = int32(maxReplicationFactor) + status.MinReplicationFactor = int32(minReplicationFactor) + return status } diff --git a/pkg/controller/distributedrediscluster/sync_handler.go b/pkg/controller/distributedrediscluster/sync_handler.go index 34e226c50..532013cba 100644 --- a/pkg/controller/distributedrediscluster/sync_handler.go +++ b/pkg/controller/distributedrediscluster/sync_handler.go @@ -2,11 +2,14 @@ package distributedrediscluster import ( "fmt" - "net" "time" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" "github.com/ucloud/redis-cluster-operator/pkg/controller/clustering" + "github.com/ucloud/redis-cluster-operator/pkg/controller/manager" "github.com/ucloud/redis-cluster-operator/pkg/k8sutil" "github.com/ucloud/redis-cluster-operator/pkg/redisutil" ) @@ -16,14 +19,23 @@ const ( requeueEnsure = 60 * time.Second ) -func (r *ReconcileDistributedRedisCluster) waitPodReady(cluster *redisv1alpha1.DistributedRedisCluster) error { +type syncContext struct { + cluster *redisv1alpha1.DistributedRedisCluster + clusterInfos *redisutil.ClusterInfos + admin redisutil.IAdmin + healer manager.IHeal + pods []*corev1.Pod + reqLogger logr.Logger +} + +func (r *ReconcileDistributedRedisCluster) ensureCluster(ctx *syncContext) error { + cluster := ctx.cluster if err := r.validate(cluster); err != nil { if k8sutil.IsRequestRetryable(err) { return Kubernetes.Wrap(err, "Validate") } return StopRetry.Wrap(err, "stop retry") } - // step 1. apply statefulSet for cluster labels := getLabels(cluster) var backup *redisv1alpha1.RedisClusterBackup var err error @@ -48,9 +60,14 @@ func (r *ReconcileDistributedRedisCluster) waitPodReady(cluster *redisv1alpha1.D } return StopRetry.Wrap(err, "stop retry") } + return nil +} - // step 2. wait for all redis node ready - if err := r.checker.CheckRedisNodeNum(cluster); err != nil { +func (r *ReconcileDistributedRedisCluster) waitPodReady(ctx *syncContext) error { + if _, err := ctx.healer.FixTerminatingPods(ctx.cluster, 5*time.Minute); err != nil { + return Kubernetes.Wrap(err, "FixTerminatingPods") + } + if err := r.checker.CheckRedisNodeNum(ctx.cluster); err != nil { return Requeue.Wrap(err, "CheckRedisNodeNum") } @@ -84,23 +101,23 @@ func (r *ReconcileDistributedRedisCluster) validate(cluster *redisv1alpha1.Distr return nil } -func (r *ReconcileDistributedRedisCluster) waitForClusterJoin(cluster *redisv1alpha1.DistributedRedisCluster, clusterInfos *redisutil.ClusterInfos, admin redisutil.IAdmin) error { - logger := log.WithValues("namespace", cluster.Namespace, "name", cluster.Name) +func (r *ReconcileDistributedRedisCluster) waitForClusterJoin(ctx *syncContext) error { //logger.Info(">>> Assign a different config epoch to each node") //err := admin.SetConfigEpoch() //if err != nil { // return Redis.Wrap(err, "SetConfigEpoch") //} - if _, err := admin.GetClusterInfos(); err == nil { + if infos, err := ctx.admin.GetClusterInfos(); err == nil { + ctx.reqLogger.V(6).Info("debug waitForClusterJoin", "cluster infos", infos) return nil } var firstNode *redisutil.Node - for _, nodeInfo := range clusterInfos.Infos { + for _, nodeInfo := range ctx.clusterInfos.Infos { firstNode = nodeInfo.Node break } - logger.Info(">>> Sending CLUSTER MEET messages to join the cluster") - err := admin.AttachNodeToCluster(firstNode.IPPort()) + ctx.reqLogger.Info(">>> Sending CLUSTER MEET messages to join the cluster") + err := ctx.admin.AttachNodeToCluster(firstNode.IPPort()) if err != nil { return Redis.Wrap(err, "AttachNodeToCluster") } @@ -108,13 +125,15 @@ func (r *ReconcileDistributedRedisCluster) waitForClusterJoin(cluster *redisv1al // waiting for cluster join will find all the nodes agree about // the config as they are still empty with unassigned slots. time.Sleep(1 * time.Second) - _, err = admin.GetClusterInfos() + + _, err = ctx.admin.GetClusterInfos() if err != nil { return Requeue.Wrap(err, "wait for cluster join") } return nil } +/* func (r *ReconcileDistributedRedisCluster) sync(cluster *redisv1alpha1.DistributedRedisCluster, clusterInfos *redisutil.ClusterInfos, admin redisutil.IAdmin) error { logger := log.WithValues("namespace", cluster.Namespace, "name", cluster.Name) // step 3. check if the cluster is empty, if it is empty, init the cluster @@ -177,8 +196,9 @@ func (r *ReconcileDistributedRedisCluster) sync(cluster *redisv1alpha1.Distribut } return nil -} +}*/ +/* func (r *ReconcileDistributedRedisCluster) syncCluster(cluster *redisv1alpha1.DistributedRedisCluster, clusterInfos *redisutil.ClusterInfos, admin redisutil.IAdmin) error { logger := log.WithValues("namespace", cluster.Namespace, "name", cluster.Name) cNbMaster := cluster.Spec.MasterSize @@ -254,10 +274,12 @@ func (r *ReconcileDistributedRedisCluster) syncCluster(cluster *redisv1alpha1.Di } return nil -} +}*/ -func (r *ReconcileDistributedRedisCluster) ensureCluster(cluster *redisv1alpha1.DistributedRedisCluster, clusterInfos *redisutil.ClusterInfos, admin redisutil.IAdmin) error { - logger := log.WithValues("namespace", cluster.Namespace, "name", cluster.Name) +func (r *ReconcileDistributedRedisCluster) sync(ctx *syncContext) error { + cluster := ctx.cluster + admin := ctx.admin + clusterInfos := ctx.clusterInfos if err := admin.SetConfigIfNeed(cluster.Spec.Config); err != nil { return Redis.Wrap(err, "SetConfigIfNeed") } @@ -280,7 +302,7 @@ func (r *ReconcileDistributedRedisCluster) ensureCluster(cluster *redisv1alpha1. if err != nil { return Cluster.Wrap(err, "DispatchMasters") } - logger.V(4).Info("DispatchMasters Info", "newMasters", newMasters, "curMasters", curMasters, "allMaster", allMaster) + ctx.reqLogger.V(4).Info("DispatchMasters Info", "newMasters", newMasters, "curMasters", curMasters, "allMaster", allMaster) // Second select Node that is already a slave currentSlaveNodes := nodes.FilterByFunc(redisutil.IsSlave) @@ -301,7 +323,7 @@ func (r *ReconcileDistributedRedisCluster) ensureCluster(cluster *redisv1alpha1. }) if 0 == len(curMasters) { - logger.Info("Creating cluster") + ctx.reqLogger.Info("Creating cluster") newRedisSlavesByMaster, bestEffort := clustering.PlaceSlaves(rCluster, newMasters, currentSlaveNodes, newSlave, cReplicaFactor) if bestEffort { rCluster.NodesPlacement = redisv1alpha1.NodesPlacementInfoBestEffort @@ -315,7 +337,7 @@ func (r *ReconcileDistributedRedisCluster) ensureCluster(cluster *redisv1alpha1. return Cluster.Wrap(err, "AllocSlots") } } else if len(newMasters) > len(curMasters) { - logger.Info("Scaling cluster") + ctx.reqLogger.Info("Scaling cluster") newRedisSlavesByMaster, bestEffort := clustering.PlaceSlaves(rCluster, newMasters, currentSlaveNodes, newSlave, cReplicaFactor) if bestEffort { rCluster.NodesPlacement = redisv1alpha1.NodesPlacementInfoBestEffort @@ -328,7 +350,7 @@ func (r *ReconcileDistributedRedisCluster) ensureCluster(cluster *redisv1alpha1. if err := clustering.RebalancedCluster(admin, newMasters); err != nil { return Cluster.Wrap(err, "RebalancedCluster") } - } else if len(newMasters) == len(curMasters) { + } else if cluster.Status.MinReplicationFactor < cluster.Spec.ClusterReplicas { newRedisSlavesByMaster, bestEffort := clustering.PlaceSlaves(rCluster, newMasters, currentSlaveNodes, newSlave, cReplicaFactor) if bestEffort { rCluster.NodesPlacement = redisv1alpha1.NodesPlacementInfoBestEffort diff --git a/pkg/controller/heal/clustersplit.go b/pkg/controller/heal/clustersplit.go new file mode 100644 index 000000000..55891cc5b --- /dev/null +++ b/pkg/controller/heal/clustersplit.go @@ -0,0 +1,129 @@ +package heal + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/util/errors" + + redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" + "github.com/ucloud/redis-cluster-operator/pkg/config" + "github.com/ucloud/redis-cluster-operator/pkg/redisutil" +) + +// FixClusterSplit use to detect and fix Cluster split +func (c *CheckAndHeal) FixClusterSplit(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin, config *config.Redis) (bool, error) { + clusters := buildClustersLists(infos) + + if len(clusters) > 1 { + if c.DryRun { + return true, nil + } + return true, c.reassignClusters(admin, config, clusters) + } + c.Logger.V(3).Info("[Check] No split cluster detected") + return false, nil +} + +type cluster []string + +func (c *CheckAndHeal) reassignClusters(admin redisutil.IAdmin, config *config.Redis, clusters []cluster) error { + c.Logger.Info("[Check] Cluster split detected, the Redis manager will recover from the issue, but data may be lost") + var errs []error + // only one cluster may remain + mainCluster, badClusters := splitMainCluster(clusters) + if len(mainCluster) == 0 { + c.Logger.Error(nil, "[Check] Impossible to fix cluster split, cannot elect main cluster") + return fmt.Errorf("impossible to fix cluster split, cannot elect main cluster") + } + c.Logger.Info("[Check] Cluster is elected as main cluster", "Cluster", mainCluster) + // reset admin to connect to the correct cluster + admin.Connections().ReplaceAll(mainCluster) + + // reconfigure bad clusters + for _, cluster := range badClusters { + c.Logger.Info(fmt.Sprintf("[Check] All keys stored in redis cluster '%s' will be lost", cluster)) + clusterAdmin := redisutil.NewAdmin(cluster, + &redisutil.AdminOptions{ + ConnectionTimeout: time.Duration(config.DialTimeout) * time.Millisecond, + RenameCommandsFile: config.GetRenameCommandsFile(), + }) + for _, nodeAddr := range cluster { + if err := clusterAdmin.FlushAndReset(nodeAddr, redisutil.ResetHard); err != nil { + c.Logger.Error(err, "unable to flush the node", "node", nodeAddr) + errs = append(errs, err) + } + if err := admin.AttachNodeToCluster(nodeAddr); err != nil { + c.Logger.Error(err, "unable to attach the node", "node", nodeAddr) + errs = append(errs, err) + } + + } + clusterAdmin.Close() + } + + return errors.NewAggregate(errs) +} + +func splitMainCluster(clusters []cluster) (cluster, []cluster) { + if len(clusters) == 0 { + return cluster{}, []cluster{} + } + // only the bigger cluster is kept, or the first one if several cluster have the same size + maincluster := -1 + maxSize := 0 + for i, c := range clusters { + if len(c) > maxSize { + maxSize = len(c) + maincluster = i + } + } + if maincluster != -1 { + main := clusters[maincluster] + return main, append(clusters[:maincluster], clusters[maincluster+1:]...) + } + return clusters[0], []cluster{} +} + +// buildClustersLists build a list of independant clusters +// we could have cluster partially overlapping in case of inconsistent cluster view +func buildClustersLists(infos *redisutil.ClusterInfos) []cluster { + clusters := []cluster{} + for _, nodeinfos := range infos.Infos { + if nodeinfos == nil || nodeinfos.Node == nil { + continue + } + slice := append(nodeinfos.Friends, nodeinfos.Node) + var c cluster + // build list of addresses + for _, node := range slice { + if len(node.FailStatus) == 0 { + c = append(c, node.IPPort()) + } + } + // check if this cluster overlap with another + overlap := false + for _, node := range c { + if findInCluster(node, clusters) { + overlap = true + break + } + } + // if this is a new cluster, add it + if !overlap { + clusters = append(clusters, c) + } + } + return clusters +} + +func findInCluster(addr string, clusters []cluster) bool { + for _, c := range clusters { + for _, nodeAddr := range c { + if addr == nodeAddr { + return true + } + } + } + return false +} diff --git a/pkg/controller/heal/clustersplit_test.go b/pkg/controller/heal/clustersplit_test.go new file mode 100644 index 000000000..69499188c --- /dev/null +++ b/pkg/controller/heal/clustersplit_test.go @@ -0,0 +1,114 @@ +package heal + +import ( + "testing" + + "github.com/ucloud/redis-cluster-operator/pkg/redisutil" +) + +func Test_buildClustersLists(t *testing.T) { + // In the test below, we cannot directly use initialize redisutil.NodeSlice in redisutil.NodeInfos, this is a go vet issue: https://github.com/golang/go/issues/9171 + ip1 := redisutil.Nodes{{IP: "ip1", Port: "1234"}} + ip2 := redisutil.Nodes{{IP: "ip2", Port: "1234"}} + ip56 := redisutil.Nodes{{IP: "ip5", Port: "1234"}, {IP: "ip6", Port: "1234"}} + ip64 := redisutil.Nodes{{IP: "ip6", Port: "1234"}, {IP: "ip4", Port: "1234"}} + ip54 := redisutil.Nodes{{IP: "ip5", Port: "1234"}, {IP: "ip4", Port: "1234"}} + // end of workaround + testCases := []struct { + input *redisutil.ClusterInfos + output []cluster + }{ //several partilly different cannot happen, so not tested + { // empty + input: &redisutil.ClusterInfos{Infos: map[string]*redisutil.NodeInfos{}, Status: redisutil.ClusterInfosConsistent}, + output: []cluster{}, + }, + { // one node + input: &redisutil.ClusterInfos{Infos: map[string]*redisutil.NodeInfos{"ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: redisutil.Nodes{}}}, Status: redisutil.ClusterInfosConsistent}, + output: []cluster{{"ip1:1234"}}, + }, + { // no discrepency + input: &redisutil.ClusterInfos{ + Infos: map[string]*redisutil.NodeInfos{ + "ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: ip2}, + "ip2:1234": {Node: &redisutil.Node{IP: "ip2", Port: "1234"}, Friends: ip1}, + }, + Status: redisutil.ClusterInfosConsistent, + }, + output: []cluster{{"ip1:1234", "ip2:1234"}}, + }, + { // several decorelated + input: &redisutil.ClusterInfos{ + Infos: map[string]*redisutil.NodeInfos{ + "ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: ip2}, + "ip2:1234": {Node: &redisutil.Node{IP: "ip2", Port: "1234"}, Friends: ip1}, + "ip3:1234": {Node: &redisutil.Node{IP: "ip3", Port: "1234"}, Friends: redisutil.Nodes{}}, + "ip4:1234": {Node: &redisutil.Node{IP: "ip4", Port: "1234"}, Friends: ip56}, + "ip5:1234": {Node: &redisutil.Node{IP: "ip5", Port: "1234"}, Friends: ip64}, + "ip6:1234": {Node: &redisutil.Node{IP: "ip6", Port: "1234"}, Friends: ip54}, + }, + Status: redisutil.ClusterInfosInconsistent, + }, + output: []cluster{{"ip1:1234", "ip2:1234"}, {"ip3:1234"}, {"ip4:1234", "ip5:1234", "ip6:1234"}}, + }, + { // empty ignored + input: &redisutil.ClusterInfos{ + Infos: map[string]*redisutil.NodeInfos{ + "ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: ip2}, + "ip2:1234": {Node: &redisutil.Node{IP: "ip2", Port: "1234"}, Friends: ip1}, + "ip3:1234": nil, + }, + Status: redisutil.ClusterInfosInconsistent, + }, + output: []cluster{{"ip1:1234", "ip2:1234"}}, + }, + } + + for i, tc := range testCases { + output := buildClustersLists(tc.input) + // because we work with map, order might not be conserved + if !compareClusters(output, tc.output) { + t.Errorf("[Case %d] Unexpected result for buildClustersLists, expected %v, got %v", i, tc.output, output) + } + } +} + +func compareClusters(c1, c2 []cluster) bool { + if len(c1) != len(c2) { + return false + } + + for _, c1elem := range c2 { + found := false + for _, c2elem := range c1 { + if compareCluster(c1elem, c2elem) { + found = true + break + } + } + if !found { + return false + } + } + + return true +} + +func compareCluster(c1, c2 cluster) bool { + if len(c1) != len(c2) { + return false + } + for _, c1elem := range c2 { + found := false + for _, c2elem := range c1 { + if c1elem == c2elem { + found = true + break + } + } + if !found { + return false + } + } + + return true +} diff --git a/pkg/controller/heal/failednodes.go b/pkg/controller/heal/failednodes.go new file mode 100644 index 000000000..c0e70ebf6 --- /dev/null +++ b/pkg/controller/heal/failednodes.go @@ -0,0 +1,56 @@ +package heal + +import ( + "k8s.io/apimachinery/pkg/util/errors" + + redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" + "github.com/ucloud/redis-cluster-operator/pkg/redisutil" +) + +// FixFailedNodes fix failed nodes: in some cases (cluster without enough master after crash or scale down), some nodes may still know about fail nodes +func (c *CheckAndHeal) FixFailedNodes(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin) (bool, error) { + forgetSet := listGhostNodes(cluster, infos) + var errs []error + doneAnAction := false + for id := range forgetSet { + doneAnAction = true + c.Logger.Info("[FixFailedNodes] Forgetting failed node, this command might fail, this is not an error", "node", id) + if !c.DryRun { + c.Logger.Info("[FixFailedNodes] try to forget node", "nodeId", id) + if err := admin.ForgetNode(id); err != nil { + errs = append(errs, err) + } + } + } + + return doneAnAction, errors.NewAggregate(errs) +} + +// listGhostNodes : A Ghost node is a node still known by some redis node but which doesn't exists anymore +// meaning it is failed, and pod not in kubernetes, or without targetable IP +func listGhostNodes(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos) map[string]bool { + ghostNodesSet := map[string]bool{} + if infos == nil || infos.Infos == nil { + return ghostNodesSet + } + for _, nodeinfos := range infos.Infos { + for _, node := range nodeinfos.Friends { + // only forget it when no more part of kubernetes, or if noaddress + if node.HasStatus(redisutil.NodeStatusNoAddr) { + ghostNodesSet[node.ID] = true + } + if node.HasStatus(redisutil.NodeStatusFail) || node.HasStatus(redisutil.NodeStatusPFail) { + found := false + for _, pod := range cluster.Status.Nodes { + if pod.ID == node.ID { + found = true + } + } + if !found { + ghostNodesSet[node.ID] = true + } + } + } + } + return ghostNodesSet +} diff --git a/pkg/controller/heal/heal.go b/pkg/controller/heal/heal.go new file mode 100644 index 000000000..4d8dcf2bf --- /dev/null +++ b/pkg/controller/heal/heal.go @@ -0,0 +1,15 @@ +package heal + +import ( + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + + "github.com/ucloud/redis-cluster-operator/pkg/k8sutil" +) + +type CheckAndHeal struct { + Logger logr.Logger + PodControl k8sutil.IPodControl + Pods []*corev1.Pod + DryRun bool +} diff --git a/pkg/controller/heal/terminatingpod.go b/pkg/controller/heal/terminatingpod.go new file mode 100644 index 000000000..39674ff6f --- /dev/null +++ b/pkg/controller/heal/terminatingpod.go @@ -0,0 +1,42 @@ +package heal + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/errors" + + redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" +) + +// FixTerminatingPods used to for the deletion of pod blocked in terminating status. +// in it append the this method will for the deletion of the Pod. +func (c *CheckAndHeal) FixTerminatingPods(cluster *redisv1alpha1.DistributedRedisCluster, maxDuration time.Duration) (bool, error) { + var errs []error + var actionDone bool + + if maxDuration == time.Duration(0) { + return actionDone, nil + } + + now := time.Now() + for _, pod := range c.Pods { + if pod.DeletionTimestamp == nil { + // ignore pod without deletion timestamp + continue + } + maxTime := pod.DeletionTimestamp.Add(maxDuration) // adding MaxDuration for configuration + if maxTime.Before(now) { + c.Logger.Info("[FixTerminatingPods] found deletion pod", "podName", pod.Name) + actionDone = true + // it means that this pod should already been deleted since a wild + if !c.DryRun { + c.Logger.Info("[FixTerminatingPods] try to delete pod", "podName", pod.Name) + if err := c.PodControl.DeletePodByName(cluster.Namespace, pod.Name); err != nil { + errs = append(errs, err) + } + } + } + } + + return actionDone, errors.NewAggregate(errs) +} diff --git a/pkg/controller/heal/untrustenodes.go b/pkg/controller/heal/untrustenodes.go new file mode 100644 index 000000000..3d6f45026 --- /dev/null +++ b/pkg/controller/heal/untrustenodes.go @@ -0,0 +1,91 @@ +package heal + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/errors" + + redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" + "github.com/ucloud/redis-cluster-operator/pkg/redisutil" +) + +// FixUntrustedNodes used to remove Nodes that are not trusted by other nodes. It can append when a node +// are removed from the cluster (with the "forget nodes" command) but try to rejoins the cluster. +func (c *CheckAndHeal) FixUntrustedNodes(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin) (bool, error) { + untrustedNode := listUntrustedNodes(infos) + var errs []error + doneAnAction := false + + for id, uNode := range untrustedNode { + c.Logger.Info("[FixUntrustedNodes] found untrust node", "node", uNode) + getByIPFunc := func(n *redisutil.Node) bool { + if n.IP == uNode.IP && n.ID != uNode.ID { + return true + } + return false + } + node2, err := infos.GetNodes().GetNodesByFunc(getByIPFunc) + if err != nil && !redisutil.IsNodeNotFoundedError(err) { + c.Logger.Error(err, "error with GetNodesByFunc(getByIPFunc) search function") + errs = append(errs, err) + continue + } + if len(node2) > 0 { + // it means the POD is used by another Redis node ID so we should not delete the pod. + continue + } + exist, reused := checkIfPodNameExistAndIsReused(uNode, c.Pods) + if exist && !reused { + c.Logger.Info("[FixUntrustedNodes] try to delete pod", "podName", uNode.PodName) + if err := c.PodControl.DeletePodByName(cluster.Namespace, uNode.PodName); err != nil { + errs = append(errs, err) + } + } + doneAnAction = true + if !c.DryRun { + c.Logger.Info("[FixUntrustedNodes] try to forget node", "nodeId", id) + if err := admin.ForgetNode(id); err != nil { + errs = append(errs, err) + } + } + } + + return doneAnAction, errors.NewAggregate(errs) +} + +func listUntrustedNodes(infos *redisutil.ClusterInfos) map[string]*redisutil.Node { + untrustedNodes := make(map[string]*redisutil.Node) + if infos == nil || infos.Infos == nil { + return untrustedNodes + } + for _, nodeinfos := range infos.Infos { + for _, node := range nodeinfos.Friends { + if node.HasStatus(redisutil.NodeStatusHandshake) { + if _, found := untrustedNodes[node.ID]; !found { + untrustedNodes[node.ID] = node + } + } + } + } + return untrustedNodes +} + +func checkIfPodNameExistAndIsReused(node *redisutil.Node, podlist []*corev1.Pod) (exist bool, reused bool) { + if node.PodName == "" { + return + } + for _, currentPod := range podlist { + if currentPod.Name == node.PodName { + exist = true + if currentPod.Status.PodIP == node.IP { + // this check is use to see if the Pod name is not use by another RedisNode. + // for that we check the the Pod name from the Redis node is not used by another + // Redis node, by comparing the IP of the current Pod with the Pod from the cluster bom. + // if the Pod IP and Name from the redis info is equal to the IP/NAME from the getPod; it + // means that the Pod is still use and the Redis Node is not a ghost + reused = true + break + } + } + } + return +} diff --git a/pkg/controller/redisclusterbackup/helper.go b/pkg/controller/redisclusterbackup/helper.go index c9e805669..dd87802b4 100644 --- a/pkg/controller/redisclusterbackup/helper.go +++ b/pkg/controller/redisclusterbackup/helper.go @@ -5,6 +5,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" @@ -91,3 +92,11 @@ func redisPassword(cluster *redisv1alpha1.DistributedRedisCluster) corev1.EnvVar }, } } + +func newDirectClient(config *rest.Config) client.Client { + c, err := client.New(config, client.Options{}) + if err != nil { + panic(err) + } + return c +} diff --git a/pkg/controller/redisclusterbackup/redisclusterbackup_controller.go b/pkg/controller/redisclusterbackup/redisclusterbackup_controller.go index 29d5ed6e3..b7377a483 100644 --- a/pkg/controller/redisclusterbackup/redisclusterbackup_controller.go +++ b/pkg/controller/redisclusterbackup/redisclusterbackup_controller.go @@ -42,7 +42,8 @@ func Add(mgr manager.Manager) error { func newReconciler(mgr manager.Manager) reconcile.Reconciler { r := &ReconcileRedisClusterBackup{client: mgr.GetClient(), scheme: mgr.GetScheme()} r.crController = k8sutil.NewCRControl(r.client) - r.jobController = k8sutil.NewJobController(r.client) + r.directClient = newDirectClient(mgr.GetConfig()) + r.jobController = k8sutil.NewJobController(r.directClient) r.recorder = mgr.GetEventRecorderFor("redis-cluster-operator-backup") return r } @@ -109,9 +110,10 @@ var _ reconcile.Reconciler = &ReconcileRedisClusterBackup{} type ReconcileRedisClusterBackup struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver - client client.Client - scheme *runtime.Scheme - recorder record.EventRecorder + client client.Client + directClient client.Client + scheme *runtime.Scheme + recorder record.EventRecorder crController k8sutil.ICustomResource jobController k8sutil.IJobControl diff --git a/pkg/controller/redisclusterbackup/sync_handler.go b/pkg/controller/redisclusterbackup/sync_handler.go index f92357091..cd2ccf658 100644 --- a/pkg/controller/redisclusterbackup/sync_handler.go +++ b/pkg/controller/redisclusterbackup/sync_handler.go @@ -180,7 +180,7 @@ func (r *ReconcileRedisClusterBackup) create(reqLogger logr.Logger, backup *redi return err } - reqLogger.V(4).Info("Backup running") + reqLogger.Info("Backup running") r.recorder.Event( backup, corev1.EventTypeNormal, @@ -502,6 +502,7 @@ func (r *ReconcileRedisClusterBackup) handleBackupJob(reqLogger logr.Logger, bac if jobSucceeded { msg := "Successfully completed backup" + reqLogger.Info(msg) r.recorder.Event( backup, corev1.EventTypeNormal, @@ -516,6 +517,7 @@ func (r *ReconcileRedisClusterBackup) handleBackupJob(reqLogger logr.Logger, bac ) } else { msg := "Failed to complete backup" + reqLogger.Info(msg) r.recorder.Event( backup, corev1.EventTypeWarning, diff --git a/pkg/k8sutil/pod.go b/pkg/k8sutil/pod.go new file mode 100644 index 000000000..0a3c1fc28 --- /dev/null +++ b/pkg/k8sutil/pod.go @@ -0,0 +1,66 @@ +package k8sutil + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// IPodControl defines the interface that uses to create, update, and delete Pods. +type IPodControl interface { + // CreatePod creates a Pod in a DistributedRedisCluster. + CreatePod(*corev1.Pod) error + // UpdatePod updates a Pod in a DistributedRedisCluster. + UpdatePod(*corev1.Pod) error + // DeletePod deletes a Pod in a DistributedRedisCluster. + DeletePod(*corev1.Pod) error + DeletePodByName(namespace, name string) error + // GetPod get Pod in a DistributedRedisCluster. + GetPod(namespace, name string) (*corev1.Pod, error) +} + +type PodController struct { + client client.Client +} + +// NewPodController creates a concrete implementation of the +// IPodControl. +func NewPodController(client client.Client) IPodControl { + return &PodController{client: client} +} + +// CreatePod implement the IPodControl.Interface. +func (p *PodController) CreatePod(pod *corev1.Pod) error { + return p.client.Create(context.TODO(), pod) +} + +// UpdatePod implement the IPodControl.Interface. +func (p *PodController) UpdatePod(pod *corev1.Pod) error { + return p.client.Update(context.TODO(), pod) +} + +// DeletePod implement the IPodControl.Interface. +func (p *PodController) DeletePod(pod *corev1.Pod) error { + return p.client.Delete(context.TODO(), pod) +} + +// DeletePod implement the IPodControl.Interface. +func (p *PodController) DeletePodByName(namespace, name string) error { + pod, err := p.GetPod(namespace, name) + if err != nil { + return err + } + return p.client.Delete(context.TODO(), pod) +} + +// GetPod implement the IPodControl.Interface. +func (p *PodController) GetPod(namespace, name string) (*corev1.Pod, error) { + pod := &corev1.Pod{} + err := p.client.Get(context.TODO(), types.NamespacedName{ + Name: name, + Namespace: namespace, + }, pod) + return pod, err +} diff --git a/pkg/redisutil/admin.go b/pkg/redisutil/admin.go index 2e18a41d0..7807bacbc 100644 --- a/pkg/redisutil/admin.go +++ b/pkg/redisutil/admin.go @@ -52,11 +52,11 @@ type IAdmin interface { // AttachSlaveToMaster attach a slave to a master node AttachSlaveToMaster(slave *Node, masterID string) error // DetachSlave dettach a slave to its master - //DetachSlave(slave *Node) error + DetachSlave(slave *Node) error //// StartFailover execute the failover of the Redis Master corresponding to the addr //StartFailover(addr string) error - //// ForgetNode execute the Redis command to force the cluster to forgot the the Node - //ForgetNode(id string) error + // ForgetNode execute the Redis command to force the cluster to forgot the the Node + ForgetNode(id string) error //// ForgetNodeByAddr execute the Redis command to force the cluster to forgot the the Node //ForgetNodeByAddr(id string) error // SetSlots exec the redis command to set slots in a pipeline, provide @@ -77,8 +77,8 @@ type IAdmin interface { // MigrateKeys use to migrate keys from slot to other slot. if replace is true, replace key on busy error // timeout is in milliseconds MigrateKeysInSlot(addr string, dest *Node, slot Slot, batch int, timeout int, replace bool) (int, error) - //// FlushAndReset reset the cluster configuration of the node, the node is flushed in the same pipe to ensure reset works - //FlushAndReset(addr string, mode string) error + // FlushAndReset reset the cluster configuration of the node, the node is flushed in the same pipe to ensure reset works + FlushAndReset(addr string, mode string) error //// FlushAll flush all keys in cluster //FlushAll() // GetHashMaxSlot get the max slot value @@ -132,6 +132,7 @@ func (a *Admin) GetClusterInfos() (*ClusterInfos, error) { for addr, c := range a.Connections().GetAll() { nodeinfos, err := a.getInfos(c, addr) if err != nil { + log.WithValues("err", err).Info("get redis info failed") infos.Status = ClusterInfosPartial clusterErr.partial = true clusterErr.errs[addr] = err @@ -480,3 +481,69 @@ func (a *Admin) MigrateKeysInSlot(addr string, dest *Node, slot Slot, batch int, return keyCount, nil } + +// ForgetNode used to force other redis cluster node to forget a specific node +func (a *Admin) ForgetNode(id string) error { + infos, _ := a.GetClusterInfos() + for nodeAddr, nodeinfos := range infos.Infos { + if nodeinfos.Node.ID == id { + continue + } + c, err := a.Connections().Get(nodeAddr) + if err != nil { + log.Error(err, fmt.Sprintf("cannot force a forget on node %s, for node %s", nodeAddr, id)) + continue + } + + if IsSlave(nodeinfos.Node) && nodeinfos.Node.MasterReferent == id { + a.DetachSlave(nodeinfos.Node) + log.Info(fmt.Sprintf("detach slave id: %s of master: %s", nodeinfos.Node.ID, id)) + } + + resp := c.Cmd("CLUSTER", "FORGET", id) + a.Connections().ValidateResp(resp, nodeAddr, "Unable to execute FORGET command") + } + + log.Info("Forget node done", "node", id) + return nil +} + +// DetachSlave use to detach a slave to a master +func (a *Admin) DetachSlave(slave *Node) error { + c, err := a.Connections().Get(slave.IPPort()) + if err != nil { + log.Error(err, fmt.Sprintf("unable to get the connection for slave ID:%s, addr:%s , err:%v", slave.ID, slave.IPPort())) + return err + } + + resp := c.Cmd("CLUSTER", "RESET", "SOFT") + if err = a.Connections().ValidateResp(resp, slave.IPPort(), "Cannot attach node to cluster"); err != nil { + return err + } + + if err = a.AttachNodeToCluster(slave.IPPort()); err != nil { + log.Error(err, fmt.Sprintf("[DetachSlave] unable to AttachNodeToCluster the Slave id: %s addr:%s", slave.ID, slave.IPPort())) + return err + } + + slave.SetReferentMaster("") + slave.SetRole(RedisMasterRole) + + return nil +} + +// FlushAndReset flush the cluster and reset the cluster configuration of the node. Commands are piped, to ensure no items arrived between flush and reset +func (a *Admin) FlushAndReset(addr string, mode string) error { + c, err := a.Connections().Get(addr) + if err != nil { + return err + } + c.PipeAppend("FLUSHALL") + c.PipeAppend("CLUSTER", "RESET", mode) + + if !a.Connections().ValidatePipeResp(c, addr, "Cannot reset node") { + return fmt.Errorf("Cannot reset node %s", addr) + } + + return nil +} diff --git a/pkg/resources/services/service.go b/pkg/resources/services/service.go index 4df4ec3d4..249862038 100644 --- a/pkg/resources/services/service.go +++ b/pkg/resources/services/service.go @@ -7,7 +7,7 @@ import ( redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" ) -// NewStatefulSetForCR creates a new StatefulSet for the given Cluster. +// NewHeadLessSvcForCR creates a new headless service for the given Cluster. func NewHeadLessSvcForCR(cluster *redisv1alpha1.DistributedRedisCluster, labels map[string]string) *corev1.Service { clientPort := corev1.ServicePort{Name: "client", Port: 6379} gossipPort := corev1.ServicePort{Name: "gossip", Port: 16379}