diff --git a/Makefile b/Makefile index 6fb0cb6f8..bfa8158b1 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ builder-image: builder-test: @echo Running unit tests for splunk-operator inside of builder container - @docker run -v /var/run/docker.sock:/var/run/docker.sock -v ${PWD}:/opt/app-root/src/splunk-operator -w /opt/app-root/src/splunk-operator -u root -it splunk/splunk-operator-builder bash -c "go test -v -covermode=count -coverprofile=coverage.out --timeout=300s github.com/splunk/splunk-operator/pkg/splunk/resources github.com/splunk/splunk-operator/pkg/splunk/spark github.com/splunk/splunk-operator/pkg/splunk/enterprise github.com/splunk/splunk-operator/pkg/splunk/reconcile" + @docker run -v /var/run/docker.sock:/var/run/docker.sock -v ${PWD}:/opt/app-root/src/splunk-operator -w /opt/app-root/src/splunk-operator -u root -it splunk/splunk-operator-builder bash -c "go test -v -covermode=count -coverprofile=coverage.out --timeout=300s github.com/splunk/splunk-operator/pkg/splunk/resources github.com/splunk/splunk-operator/pkg/splunk/spark github.com/splunk/splunk-operator/pkg/splunk/enterprise github.com/splunk/splunk-operator/pkg/splunk/reconcile github.com/splunk/splunk-operator/pkg/splunk/client" image: @echo Building splunk-operator image @@ -41,7 +41,7 @@ local: test: @echo Running unit tests for splunk-operator - @go test -v -covermode=count -coverprofile=coverage.out --timeout=300s github.com/splunk/splunk-operator/pkg/splunk/resources github.com/splunk/splunk-operator/pkg/splunk/spark github.com/splunk/splunk-operator/pkg/splunk/enterprise github.com/splunk/splunk-operator/pkg/splunk/reconcile + @go test -v -covermode=count -coverprofile=coverage.out --timeout=300s github.com/splunk/splunk-operator/pkg/splunk/resources github.com/splunk/splunk-operator/pkg/splunk/spark github.com/splunk/splunk-operator/pkg/splunk/enterprise github.com/splunk/splunk-operator/pkg/splunk/reconcile github.com/splunk/splunk-operator/pkg/splunk/client stop_clair_scanner: @docker stop clair_db || true @@ -77,9 +77,9 @@ generate: @echo "---" >> deploy/crds/combined.yaml @cat deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml >> deploy/crds/combined.yaml @echo "---" >> deploy/crds/combined.yaml - @cat deploy/crds/enterprise.splunk.com_searchheads_crd.yaml >> deploy/crds/combined.yaml + @cat deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml >> deploy/crds/combined.yaml @echo "---" >> deploy/crds/combined.yaml - @cat deploy/crds/enterprise.splunk.com_indexers_crd.yaml >> deploy/crds/combined.yaml + @cat deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml >> deploy/crds/combined.yaml @echo "---" >> deploy/crds/combined.yaml @cat deploy/crds/enterprise.splunk.com_sparks_crd.yaml >> deploy/crds/combined.yaml @echo Rebuilding deploy/all-in-one-scoped.yaml diff --git a/README.md b/README.md index 787e670fc..a3380384f 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,8 @@ This repository consists of the following code used to build the splunk-operator * `pkg/splunk/enterprise/`: Source code for managing Splunk Enterprise deployments * `pkg/splunk/spark/`: Source code for managing Spark cluster deployments * `pkg/splunk/resources/`: Generic utility code used by other splunk modules +* `pkg/splunk/client/`: Simple client for Splunk Enterprise REST API +* `pkg/splunk/test/`: Common code used for testing other modules `main()` basically just instantiates the `controllers`, and the `controllers` call into the `reconcile` module to perform actions. The `reconcile` module uses the `enterprise` diff --git a/build/Dockerfile b/build/Dockerfile index 38f42143c..c96f5163d 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -16,10 +16,10 @@ LABEL name="splunk" \ COPY build/_output/bin/splunk-operator ${OPERATOR} COPY build/bin /usr/local/bin -RUN mkdir /licenses \ - && curl -o /licenses/apache-2.0.txt https://www.apache.org/licenses/LICENSE-2.0.txt \ - && curl -o /licenses/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf https://www.redhat.com/licenses/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf \ - && /usr/local/bin/user_setup +RUN mkdir /licenses && /usr/local/bin/user_setup + +COPY build/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf /licenses +COPY LICENSE /licenses/LICENSE-2.0.txt ENTRYPOINT ["/usr/local/bin/entrypoint"] diff --git a/build/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf b/build/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf new file mode 100644 index 000000000..3a32abd75 Binary files /dev/null and b/build/EULA_Red_Hat_Universal_Base_Image_English_20190422.pdf differ diff --git a/deploy/all-in-one-cluster.yaml b/deploy/all-in-one-cluster.yaml index 7525dc2b6..f3b7587c3 100644 --- a/deploy/all-in-one-cluster.yaml +++ b/deploy/all-in-one-cluster.yaml @@ -5,14 +5,22 @@ metadata: name: standalones.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of standalone instances name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of standalone instances - name: Instances type: string + - JSONPath: .status.replicas + description: Number of desired standalone instances + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready standalone instances + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of standalone resource + name: Age + type: date group: enterprise.splunk.com names: kind: Standalone @@ -21,6 +29,10 @@ spec: singular: standalone scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -628,7 +640,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -638,9 +650,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -717,6 +729,7 @@ spec: type: string replicas: description: Number of standalone pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -979,7 +992,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -2211,17 +2224,27 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: - instances: - description: current number of standalone instances - type: integer phase: description: current phase of the standalone instances enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready standalone instances + format: int32 + type: integer + replicas: + description: number of desired standalone instances + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -2237,10 +2260,14 @@ metadata: name: licensemasters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of license master name: Phase - type: integer + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of license master + name: Age + type: date group: enterprise.splunk.com names: kind: LicenseMaster @@ -2858,7 +2885,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -2868,9 +2895,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -4399,9 +4426,13 @@ spec: phase: description: current phase of the license master enum: - - pending - - ready - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string type: object type: object @@ -4414,33 +4445,48 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: searchheads.enterprise.splunk.com + name: searchheadclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of search head cluster name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of search heads - name: Instances type: string + - JSONPath: .status.deployerPhase + description: Status of the deployer + name: Deployer + type: string + - JSONPath: .status.replicas + description: Desired number of search head cluster members + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready search head cluster members + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of search head cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: SearchHead - listKind: SearchHeadList - plural: searchheads + kind: SearchHeadCluster + listKind: SearchHeadClusterList + plural: searchheadclusters shortNames: - - search - - sh - singular: searchhead + - shc + singular: searchheadcluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: SearchHead is the Schema for a Splunk Enterprise standalone search - head or cluster of search heads + description: SearchHeadCluster is the Schema for a Splunk Enterprise search + head cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -4455,8 +4501,8 @@ spec: metadata: type: object spec: - description: SearchHeadSpec defines the desired state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterSpec defines the desired state of a Splunk + Enterprise search head cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -5044,7 +5090,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -5054,9 +5100,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -5134,6 +5180,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -5396,7 +5443,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -6625,20 +6672,86 @@ spec: type: array type: object status: - description: SearchHeadStatus defines the observed state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterStatus defines the observed state of a Splunk + Enterprise search head cluster properties: - instances: - description: current number of search head instances - type: integer + captain: + description: name or label of the search head captain + type: string + captainReady: + description: true if the search head cluster's captain is ready to service + requests + type: boolean + deployerPhase: + description: current phase of the deployer + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + initialized: + description: true if the search head cluster has finished initialization + type: boolean + maintenanceMode: + description: true if the search head cluster is in maintenance mode + type: boolean + members: + description: status of each search head cluster member + items: + description: SearchHeadClusterMemberStatus is used to track the status + of each search head cluster member + properties: + active_historical_search_count: + description: Number of currently running historical searches. + type: integer + active_realtime_search_count: + description: Number of currently running realtime searches. + type: integer + adhoc_searchhead: + description: Flag that indicates if this member can run scheduled + searches. + type: boolean + is_registered: + description: Indicates if this member is registered with the searchhead + cluster captain. + type: boolean + name: + description: Name of the search head cluster member + type: string + status: + description: Indicates the status of the member. + type: string + type: object + type: array + minPeersJoined: + description: true if the minimum number of search head cluster members + have joined + type: boolean phase: description: current phase of the search head cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready search head cluster members + format: int32 + type: integer + replicas: + description: desired number of search head cluster members + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -6651,36 +6764,48 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: indexers.enterprise.splunk.com + name: indexerclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of indexer cluster name: Phase - type: integer - - JSONPath: .spec.status.clusterMasterPhase - description: Status of cluster master - name: CM type: string - - JSONPath: .spec.status.instances - description: Number of indexers - name: Instances + - JSONPath: .status.clusterMasterPhase + description: Status of cluster master + name: Master type: string + - JSONPath: .status.replicas + description: Desired number of indexer peers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready indexer peers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of indexer cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: Indexer - listKind: IndexerList - plural: indexers + kind: IndexerCluster + listKind: IndexerClusterList + plural: indexerclusters shortNames: - - idx - singular: indexer + - idc + - idxc + singular: indexercluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: Indexer is the Schema for a Splunk Enterprise standalone indexer - or cluster of indexers + description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -6695,8 +6820,8 @@ spec: metadata: type: object spec: - description: IndexerSpec defines the desired state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterSpec defines the desired state of a Splunk Enterprise + indexer cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -7284,7 +7409,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -7294,9 +7419,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -7374,6 +7499,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -8823,30 +8949,85 @@ spec: type: array type: object status: - description: IndexerStatus defines the observed state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterStatus defines the observed state of a Splunk + Enterprise indexer cluster properties: clusterMasterPhase: description: current phase of the cluster master enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string - instances: - description: current number of indexer instances - type: integer + indexing_ready_flag: + description: Indicates if the cluster is ready for indexing. + type: boolean + initialized_flag: + description: Indicates if the cluster is initialized. + type: boolean + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean + peers: + description: status of each indexer cluster peer + items: + description: IndexerClusterMemberStatus is used to track the status + of each indexer cluster peer. + properties: + active_bundle_id: + description: The ID of the configuration bundle currently being + used by the master. + type: string + bucket_count: + description: Count of the number of buckets on this peer, across + all indexes. + format: int64 + type: integer + guid: + description: Unique identifier or GUID for the peer + type: string + is_searchable: + description: Flag indicating if this peer belongs to the current + committed generation and is searchable. + type: boolean + name: + description: Name of the indexer cluster peer + type: string + status: + description: Status of the indexer cluster peer + type: string + type: object + type: array phase: description: current phase of the indexer cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string + readyReplicas: + description: current number of ready indexer peers + format: int32 + type: integer + replicas: + description: desired number of indexer peers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler + type: string + service_ready_flag: + description: Indicates whether the master is ready to begin servicing, + based on whether it is initialized. + type: boolean type: object type: object version: v1alpha2 @@ -8861,14 +9042,26 @@ metadata: name: sparks.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase - description: Status of Spark cluster + - JSONPath: .status.phase + description: Status of Spark workers name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of Spark workers - name: Instances type: string + - JSONPath: .status.masterPhase + description: Status of Spark master + name: Master + type: string + - JSONPath: .status.replicas + description: Number of desired Spark workers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready Spark workers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of Spark cluster + name: Age + type: date group: enterprise.splunk.com names: kind: Spark @@ -8877,6 +9070,10 @@ spec: singular: spark scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -9471,7 +9668,7 @@ spec: type: object type: object image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -9483,6 +9680,7 @@ spec: type: string replicas: description: Number of spark worker pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -9748,17 +9946,38 @@ spec: status: description: SparkStatus defines the observed state of a Spark cluster properties: - instances: - description: current number of spark worker instances - type: integer + masterPhase: + description: current phase of the spark master + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string phase: - description: current phase of the spark cluster + description: current phase of the spark workers enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready spark workers + format: int32 + type: integer + replicas: + description: number of desired spark workers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -9903,7 +10122,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: "splunk-operator" - - name: SPLUNK_IMAGE - value: "splunk/splunk:8.0" - - name: SPARK_IMAGE + - name: RELATED_IMAGE_SPLUNK_ENTERPRISE + value: "splunk/splunk:edge" + - name: RELATED_IMAGE_SPLUNK_SPARK value: "splunk/spark" diff --git a/deploy/all-in-one-scoped.yaml b/deploy/all-in-one-scoped.yaml index 78e166b3d..9dbe0c264 100644 --- a/deploy/all-in-one-scoped.yaml +++ b/deploy/all-in-one-scoped.yaml @@ -5,14 +5,22 @@ metadata: name: standalones.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of standalone instances name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of standalone instances - name: Instances type: string + - JSONPath: .status.replicas + description: Number of desired standalone instances + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready standalone instances + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of standalone resource + name: Age + type: date group: enterprise.splunk.com names: kind: Standalone @@ -21,6 +29,10 @@ spec: singular: standalone scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -628,7 +640,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -638,9 +650,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -717,6 +729,7 @@ spec: type: string replicas: description: Number of standalone pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -979,7 +992,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -2211,17 +2224,27 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: - instances: - description: current number of standalone instances - type: integer phase: description: current phase of the standalone instances enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready standalone instances + format: int32 + type: integer + replicas: + description: number of desired standalone instances + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -2237,10 +2260,14 @@ metadata: name: licensemasters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of license master name: Phase - type: integer + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of license master + name: Age + type: date group: enterprise.splunk.com names: kind: LicenseMaster @@ -2858,7 +2885,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -2868,9 +2895,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -4399,9 +4426,13 @@ spec: phase: description: current phase of the license master enum: - - pending - - ready - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string type: object type: object @@ -4414,33 +4445,48 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: searchheads.enterprise.splunk.com + name: searchheadclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of search head cluster name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of search heads - name: Instances type: string + - JSONPath: .status.deployerPhase + description: Status of the deployer + name: Deployer + type: string + - JSONPath: .status.replicas + description: Desired number of search head cluster members + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready search head cluster members + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of search head cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: SearchHead - listKind: SearchHeadList - plural: searchheads + kind: SearchHeadCluster + listKind: SearchHeadClusterList + plural: searchheadclusters shortNames: - - search - - sh - singular: searchhead + - shc + singular: searchheadcluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: SearchHead is the Schema for a Splunk Enterprise standalone search - head or cluster of search heads + description: SearchHeadCluster is the Schema for a Splunk Enterprise search + head cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -4455,8 +4501,8 @@ spec: metadata: type: object spec: - description: SearchHeadSpec defines the desired state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterSpec defines the desired state of a Splunk + Enterprise search head cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -5044,7 +5090,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -5054,9 +5100,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -5134,6 +5180,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -5396,7 +5443,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -6625,20 +6672,86 @@ spec: type: array type: object status: - description: SearchHeadStatus defines the observed state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterStatus defines the observed state of a Splunk + Enterprise search head cluster properties: - instances: - description: current number of search head instances - type: integer + captain: + description: name or label of the search head captain + type: string + captainReady: + description: true if the search head cluster's captain is ready to service + requests + type: boolean + deployerPhase: + description: current phase of the deployer + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + initialized: + description: true if the search head cluster has finished initialization + type: boolean + maintenanceMode: + description: true if the search head cluster is in maintenance mode + type: boolean + members: + description: status of each search head cluster member + items: + description: SearchHeadClusterMemberStatus is used to track the status + of each search head cluster member + properties: + active_historical_search_count: + description: Number of currently running historical searches. + type: integer + active_realtime_search_count: + description: Number of currently running realtime searches. + type: integer + adhoc_searchhead: + description: Flag that indicates if this member can run scheduled + searches. + type: boolean + is_registered: + description: Indicates if this member is registered with the searchhead + cluster captain. + type: boolean + name: + description: Name of the search head cluster member + type: string + status: + description: Indicates the status of the member. + type: string + type: object + type: array + minPeersJoined: + description: true if the minimum number of search head cluster members + have joined + type: boolean phase: description: current phase of the search head cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready search head cluster members + format: int32 + type: integer + replicas: + description: desired number of search head cluster members + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -6651,36 +6764,48 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: indexers.enterprise.splunk.com + name: indexerclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of indexer cluster name: Phase - type: integer - - JSONPath: .spec.status.clusterMasterPhase - description: Status of cluster master - name: CM type: string - - JSONPath: .spec.status.instances - description: Number of indexers - name: Instances + - JSONPath: .status.clusterMasterPhase + description: Status of cluster master + name: Master type: string + - JSONPath: .status.replicas + description: Desired number of indexer peers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready indexer peers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of indexer cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: Indexer - listKind: IndexerList - plural: indexers + kind: IndexerCluster + listKind: IndexerClusterList + plural: indexerclusters shortNames: - - idx - singular: indexer + - idc + - idxc + singular: indexercluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: Indexer is the Schema for a Splunk Enterprise standalone indexer - or cluster of indexers + description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -6695,8 +6820,8 @@ spec: metadata: type: object spec: - description: IndexerSpec defines the desired state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterSpec defines the desired state of a Splunk Enterprise + indexer cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -7284,7 +7409,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -7294,9 +7419,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -7374,6 +7499,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -8823,30 +8949,85 @@ spec: type: array type: object status: - description: IndexerStatus defines the observed state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterStatus defines the observed state of a Splunk + Enterprise indexer cluster properties: clusterMasterPhase: description: current phase of the cluster master enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string - instances: - description: current number of indexer instances - type: integer + indexing_ready_flag: + description: Indicates if the cluster is ready for indexing. + type: boolean + initialized_flag: + description: Indicates if the cluster is initialized. + type: boolean + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean + peers: + description: status of each indexer cluster peer + items: + description: IndexerClusterMemberStatus is used to track the status + of each indexer cluster peer. + properties: + active_bundle_id: + description: The ID of the configuration bundle currently being + used by the master. + type: string + bucket_count: + description: Count of the number of buckets on this peer, across + all indexes. + format: int64 + type: integer + guid: + description: Unique identifier or GUID for the peer + type: string + is_searchable: + description: Flag indicating if this peer belongs to the current + committed generation and is searchable. + type: boolean + name: + description: Name of the indexer cluster peer + type: string + status: + description: Status of the indexer cluster peer + type: string + type: object + type: array phase: description: current phase of the indexer cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string + readyReplicas: + description: current number of ready indexer peers + format: int32 + type: integer + replicas: + description: desired number of indexer peers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler + type: string + service_ready_flag: + description: Indicates whether the master is ready to begin servicing, + based on whether it is initialized. + type: boolean type: object type: object version: v1alpha2 @@ -8861,14 +9042,26 @@ metadata: name: sparks.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase - description: Status of Spark cluster + - JSONPath: .status.phase + description: Status of Spark workers name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of Spark workers - name: Instances type: string + - JSONPath: .status.masterPhase + description: Status of Spark master + name: Master + type: string + - JSONPath: .status.replicas + description: Number of desired Spark workers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready Spark workers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of Spark cluster + name: Age + type: date group: enterprise.splunk.com names: kind: Spark @@ -8877,6 +9070,10 @@ spec: singular: spark scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -9471,7 +9668,7 @@ spec: type: object type: object image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -9483,6 +9680,7 @@ spec: type: string replicas: description: Number of spark worker pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -9748,17 +9946,38 @@ spec: status: description: SparkStatus defines the observed state of a Spark cluster properties: - instances: - description: current number of spark worker instances - type: integer + masterPhase: + description: current phase of the spark master + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string phase: - description: current phase of the spark cluster + description: current phase of the spark workers enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready spark workers + format: int32 + type: integer + replicas: + description: number of desired spark workers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -9883,7 +10102,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: "splunk-operator" - - name: SPLUNK_IMAGE - value: "splunk/splunk:8.0" - - name: SPARK_IMAGE + - name: RELATED_IMAGE_SPLUNK_ENTERPRISE + value: "splunk/splunk:edge" + - name: RELATED_IMAGE_SPLUNK_SPARK value: "splunk/spark" diff --git a/deploy/all-in-one.yaml b/deploy/all-in-one.yaml deleted file mode 100644 index 76884125c..000000000 --- a/deploy/all-in-one.yaml +++ /dev/null @@ -1,143 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - labels: - k8s-app: splunk-kubecontroller - name: splunkenterprises.enterprise.splunk.com -spec: - group: enterprise.splunk.com - names: - kind: SplunkEnterprise - listKind: SplunkEnterpriseList - plural: splunkenterprises - shortNames: - - enterprise - singular: splunkenterprise - scope: Namespaced - version: v1alpha1 ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - rbac.authorization.k8s.io/aggregate-to-admin: "true" - name: splunk:splunk-enterprise-operator -rules: - - apiGroups: - - enterprise.splunk.com - resources: - - '*' - verbs: - - '*' ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: splunk-operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - creationTimestamp: null - name: splunk-operator -rules: -- apiGroups: - - "" - resources: - - services - - endpoints - - persistentvolumeclaims - - configmaps - - secrets - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - watch -- apiGroups: - - apps - resources: - - deployments - - daemonsets - - replicasets - - statefulsets - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - enterprise.splunk.com - resources: - - '*' - verbs: - - '*' ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: splunk-operator -subjects: -- kind: ServiceAccount - name: splunk-operator -roleRef: - kind: Role - name: splunk-operator - apiGroup: rbac.authorization.k8s.io ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: splunk-operator -spec: - replicas: 1 - selector: - matchLabels: - name: splunk-operator - template: - metadata: - labels: - name: splunk-operator - spec: - serviceAccountName: splunk-operator - containers: - - name: splunk-operator - image: splunk/splunk-operator - ports: - - containerPort: 60000 - name: metrics - command: - - splunk-operator - imagePullPolicy: IfNotPresent - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: "splunk-operator" - - name: SPLUNK_IMAGE - value: "splunk/splunk:8.0" - - name: SPARK_IMAGE - value: "splunk/spark" diff --git a/deploy/cluster_operator.yaml b/deploy/cluster_operator.yaml index 2938a7dfb..e1564e861 100644 --- a/deploy/cluster_operator.yaml +++ b/deploy/cluster_operator.yaml @@ -66,7 +66,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: "splunk-operator" - - name: SPLUNK_IMAGE - value: "splunk/splunk:8.0" - - name: SPARK_IMAGE + - name: RELATED_IMAGE_SPLUNK_ENTERPRISE + value: "splunk/splunk:edge" + - name: RELATED_IMAGE_SPLUNK_SPARK value: "splunk/spark" diff --git a/deploy/crds/combined.yaml b/deploy/crds/combined.yaml index 641cfc0d3..5e59c5eb0 100644 --- a/deploy/crds/combined.yaml +++ b/deploy/crds/combined.yaml @@ -5,14 +5,22 @@ metadata: name: standalones.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of standalone instances name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of standalone instances - name: Instances type: string + - JSONPath: .status.replicas + description: Number of desired standalone instances + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready standalone instances + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of standalone resource + name: Age + type: date group: enterprise.splunk.com names: kind: Standalone @@ -21,6 +29,10 @@ spec: singular: standalone scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -628,7 +640,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -638,9 +650,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -717,6 +729,7 @@ spec: type: string replicas: description: Number of standalone pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -979,7 +992,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -2211,17 +2224,27 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: - instances: - description: current number of standalone instances - type: integer phase: description: current phase of the standalone instances enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready standalone instances + format: int32 + type: integer + replicas: + description: number of desired standalone instances + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -2237,10 +2260,14 @@ metadata: name: licensemasters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of license master name: Phase - type: integer + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of license master + name: Age + type: date group: enterprise.splunk.com names: kind: LicenseMaster @@ -2858,7 +2885,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -2868,9 +2895,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -4399,9 +4426,13 @@ spec: phase: description: current phase of the license master enum: - - pending - - ready - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string type: object type: object @@ -4414,33 +4445,48 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: searchheads.enterprise.splunk.com + name: searchheadclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of search head cluster name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of search heads - name: Instances type: string + - JSONPath: .status.deployerPhase + description: Status of the deployer + name: Deployer + type: string + - JSONPath: .status.replicas + description: Desired number of search head cluster members + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready search head cluster members + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of search head cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: SearchHead - listKind: SearchHeadList - plural: searchheads + kind: SearchHeadCluster + listKind: SearchHeadClusterList + plural: searchheadclusters shortNames: - - search - - sh - singular: searchhead + - shc + singular: searchheadcluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: SearchHead is the Schema for a Splunk Enterprise standalone search - head or cluster of search heads + description: SearchHeadCluster is the Schema for a Splunk Enterprise search + head cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -4455,8 +4501,8 @@ spec: metadata: type: object spec: - description: SearchHeadSpec defines the desired state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterSpec defines the desired state of a Splunk + Enterprise search head cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -5044,7 +5090,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -5054,9 +5100,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -5134,6 +5180,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -5396,7 +5443,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -6625,20 +6672,86 @@ spec: type: array type: object status: - description: SearchHeadStatus defines the observed state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterStatus defines the observed state of a Splunk + Enterprise search head cluster properties: - instances: - description: current number of search head instances - type: integer + captain: + description: name or label of the search head captain + type: string + captainReady: + description: true if the search head cluster's captain is ready to service + requests + type: boolean + deployerPhase: + description: current phase of the deployer + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + initialized: + description: true if the search head cluster has finished initialization + type: boolean + maintenanceMode: + description: true if the search head cluster is in maintenance mode + type: boolean + members: + description: status of each search head cluster member + items: + description: SearchHeadClusterMemberStatus is used to track the status + of each search head cluster member + properties: + active_historical_search_count: + description: Number of currently running historical searches. + type: integer + active_realtime_search_count: + description: Number of currently running realtime searches. + type: integer + adhoc_searchhead: + description: Flag that indicates if this member can run scheduled + searches. + type: boolean + is_registered: + description: Indicates if this member is registered with the searchhead + cluster captain. + type: boolean + name: + description: Name of the search head cluster member + type: string + status: + description: Indicates the status of the member. + type: string + type: object + type: array + minPeersJoined: + description: true if the minimum number of search head cluster members + have joined + type: boolean phase: description: current phase of the search head cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready search head cluster members + format: int32 + type: integer + replicas: + description: desired number of search head cluster members + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object @@ -6651,36 +6764,48 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: indexers.enterprise.splunk.com + name: indexerclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of indexer cluster name: Phase - type: integer - - JSONPath: .spec.status.clusterMasterPhase - description: Status of cluster master - name: CM type: string - - JSONPath: .spec.status.instances - description: Number of indexers - name: Instances + - JSONPath: .status.clusterMasterPhase + description: Status of cluster master + name: Master type: string + - JSONPath: .status.replicas + description: Desired number of indexer peers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready indexer peers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of indexer cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: Indexer - listKind: IndexerList - plural: indexers + kind: IndexerCluster + listKind: IndexerClusterList + plural: indexerclusters shortNames: - - idx - singular: indexer + - idc + - idxc + singular: indexercluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: Indexer is the Schema for a Splunk Enterprise standalone indexer - or cluster of indexers + description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -6695,8 +6820,8 @@ spec: metadata: type: object spec: - description: IndexerSpec defines the desired state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterSpec defines the desired state of a Splunk Enterprise + indexer cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -7284,7 +7409,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -7294,9 +7419,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -7374,6 +7499,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -8823,30 +8949,85 @@ spec: type: array type: object status: - description: IndexerStatus defines the observed state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterStatus defines the observed state of a Splunk + Enterprise indexer cluster properties: clusterMasterPhase: description: current phase of the cluster master enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string - instances: - description: current number of indexer instances - type: integer + indexing_ready_flag: + description: Indicates if the cluster is ready for indexing. + type: boolean + initialized_flag: + description: Indicates if the cluster is initialized. + type: boolean + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean + peers: + description: status of each indexer cluster peer + items: + description: IndexerClusterMemberStatus is used to track the status + of each indexer cluster peer. + properties: + active_bundle_id: + description: The ID of the configuration bundle currently being + used by the master. + type: string + bucket_count: + description: Count of the number of buckets on this peer, across + all indexes. + format: int64 + type: integer + guid: + description: Unique identifier or GUID for the peer + type: string + is_searchable: + description: Flag indicating if this peer belongs to the current + committed generation and is searchable. + type: boolean + name: + description: Name of the indexer cluster peer + type: string + status: + description: Status of the indexer cluster peer + type: string + type: object + type: array phase: description: current phase of the indexer cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string + readyReplicas: + description: current number of ready indexer peers + format: int32 + type: integer + replicas: + description: desired number of indexer peers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler + type: string + service_ready_flag: + description: Indicates whether the master is ready to begin servicing, + based on whether it is initialized. + type: boolean type: object type: object version: v1alpha2 @@ -8861,14 +9042,26 @@ metadata: name: sparks.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase - description: Status of Spark cluster + - JSONPath: .status.phase + description: Status of Spark workers name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of Spark workers - name: Instances type: string + - JSONPath: .status.masterPhase + description: Status of Spark master + name: Master + type: string + - JSONPath: .status.replicas + description: Number of desired Spark workers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready Spark workers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of Spark cluster + name: Age + type: date group: enterprise.splunk.com names: kind: Spark @@ -8877,6 +9070,10 @@ spec: singular: spark scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -9471,7 +9668,7 @@ spec: type: object type: object image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -9483,6 +9680,7 @@ spec: type: string replicas: description: Number of spark worker pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -9748,17 +9946,38 @@ spec: status: description: SparkStatus defines the observed state of a Spark cluster properties: - instances: - description: current number of spark worker instances - type: integer + masterPhase: + description: current phase of the spark master + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string phase: - description: current phase of the spark cluster + description: current phase of the spark workers enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready spark workers + format: int32 + type: integer + replicas: + description: number of desired spark workers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object diff --git a/deploy/crds/enterprise.splunk.com_indexers_crd.yaml b/deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml similarity index 97% rename from deploy/crds/enterprise.splunk.com_indexers_crd.yaml rename to deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml index 70b60c7a3..d0c6cb144 100644 --- a/deploy/crds/enterprise.splunk.com_indexers_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml @@ -1,36 +1,48 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: indexers.enterprise.splunk.com + name: indexerclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of indexer cluster name: Phase - type: integer - - JSONPath: .spec.status.clusterMasterPhase - description: Status of cluster master - name: CM type: string - - JSONPath: .spec.status.instances - description: Number of indexers - name: Instances + - JSONPath: .status.clusterMasterPhase + description: Status of cluster master + name: Master type: string + - JSONPath: .status.replicas + description: Desired number of indexer peers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready indexer peers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of indexer cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: Indexer - listKind: IndexerList - plural: indexers + kind: IndexerCluster + listKind: IndexerClusterList + plural: indexerclusters shortNames: - - idx - singular: indexer + - idc + - idxc + singular: indexercluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: Indexer is the Schema for a Splunk Enterprise standalone indexer - or cluster of indexers + description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -45,8 +57,8 @@ spec: metadata: type: object spec: - description: IndexerSpec defines the desired state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterSpec defines the desired state of a Splunk Enterprise + indexer cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -634,7 +646,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -644,9 +656,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -724,6 +736,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -2173,30 +2186,85 @@ spec: type: array type: object status: - description: IndexerStatus defines the observed state of a Splunk Enterprise - standalone indexer or cluster of indexers + description: IndexerClusterStatus defines the observed state of a Splunk + Enterprise indexer cluster properties: clusterMasterPhase: description: current phase of the cluster master enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string - instances: - description: current number of indexer instances - type: integer + indexing_ready_flag: + description: Indicates if the cluster is ready for indexing. + type: boolean + initialized_flag: + description: Indicates if the cluster is initialized. + type: boolean + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean + peers: + description: status of each indexer cluster peer + items: + description: IndexerClusterMemberStatus is used to track the status + of each indexer cluster peer. + properties: + active_bundle_id: + description: The ID of the configuration bundle currently being + used by the master. + type: string + bucket_count: + description: Count of the number of buckets on this peer, across + all indexes. + format: int64 + type: integer + guid: + description: Unique identifier or GUID for the peer + type: string + is_searchable: + description: Flag indicating if this peer belongs to the current + committed generation and is searchable. + type: boolean + name: + description: Name of the indexer cluster peer + type: string + status: + description: Status of the indexer cluster peer + type: string + type: object + type: array phase: description: current phase of the indexer cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready indexer peers + format: int32 + type: integer + replicas: + description: desired number of indexer peers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string + service_ready_flag: + description: Indicates whether the master is ready to begin servicing, + based on whether it is initialized. + type: boolean type: object type: object version: v1alpha2 diff --git a/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml b/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml index f9ec6e20d..7446c19be 100644 --- a/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml @@ -4,10 +4,14 @@ metadata: name: licensemasters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of license master name: Phase - type: integer + type: string + - JSONPath: .metadata.creationTimestamp + description: Age of license master + name: Age + type: date group: enterprise.splunk.com names: kind: LicenseMaster @@ -625,7 +629,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -635,9 +639,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -2166,9 +2170,13 @@ spec: phase: description: current phase of the license master enum: - - pending - - ready - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error type: string type: object type: object diff --git a/deploy/crds/enterprise.splunk.com_searchheads_crd.yaml b/deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml similarity index 96% rename from deploy/crds/enterprise.splunk.com_searchheads_crd.yaml rename to deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml index 6f2d8e9be..b1f7c6e3e 100644 --- a/deploy/crds/enterprise.splunk.com_searchheads_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml @@ -1,33 +1,48 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: searchheads.enterprise.splunk.com + name: searchheadclusters.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of search head cluster name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of search heads - name: Instances type: string + - JSONPath: .status.deployerPhase + description: Status of the deployer + name: Deployer + type: string + - JSONPath: .status.replicas + description: Desired number of search head cluster members + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready search head cluster members + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of search head cluster + name: Age + type: date group: enterprise.splunk.com names: - kind: SearchHead - listKind: SearchHeadList - plural: searchheads + kind: SearchHeadCluster + listKind: SearchHeadClusterList + plural: searchheadclusters shortNames: - - search - - sh - singular: searchhead + - shc + singular: searchheadcluster scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: - description: SearchHead is the Schema for a Splunk Enterprise standalone search - head or cluster of search heads + description: SearchHeadCluster is the Schema for a Splunk Enterprise search + head cluster properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -42,8 +57,8 @@ spec: metadata: type: object spec: - description: SearchHeadSpec defines the desired state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterSpec defines the desired state of a Splunk + Enterprise search head cluster properties: affinity: description: Kubernetes Affinity rules that control how pods are assigned @@ -631,7 +646,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -641,9 +656,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -721,6 +736,7 @@ spec: replicas: description: Number of search head pods; a search head cluster will be created if > 1 + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -983,7 +999,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -2212,20 +2228,86 @@ spec: type: array type: object status: - description: SearchHeadStatus defines the observed state of a Splunk Enterprise - standalone search head or cluster of search heads + description: SearchHeadClusterStatus defines the observed state of a Splunk + Enterprise search head cluster properties: - instances: - description: current number of search head instances - type: integer + captain: + description: name or label of the search head captain + type: string + captainReady: + description: true if the search head cluster's captain is ready to service + requests + type: boolean + deployerPhase: + description: current phase of the deployer + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + initialized: + description: true if the search head cluster has finished initialization + type: boolean + maintenanceMode: + description: true if the search head cluster is in maintenance mode + type: boolean + members: + description: status of each search head cluster member + items: + description: SearchHeadClusterMemberStatus is used to track the status + of each search head cluster member + properties: + active_historical_search_count: + description: Number of currently running historical searches. + type: integer + active_realtime_search_count: + description: Number of currently running realtime searches. + type: integer + adhoc_searchhead: + description: Flag that indicates if this member can run scheduled + searches. + type: boolean + is_registered: + description: Indicates if this member is registered with the searchhead + cluster captain. + type: boolean + name: + description: Name of the search head cluster member + type: string + status: + description: Indicates the status of the member. + type: string + type: object + type: array + minPeersJoined: + description: true if the minimum number of search head cluster members + have joined + type: boolean phase: description: current phase of the search head cluster enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready search head cluster members + format: int32 + type: integer + replicas: + description: desired number of search head cluster members + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object diff --git a/deploy/crds/enterprise.splunk.com_sparks_crd.yaml b/deploy/crds/enterprise.splunk.com_sparks_crd.yaml index 5e363c170..e5552ebe1 100644 --- a/deploy/crds/enterprise.splunk.com_sparks_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_sparks_crd.yaml @@ -4,14 +4,26 @@ metadata: name: sparks.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase - description: Status of Spark cluster + - JSONPath: .status.phase + description: Status of Spark workers name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of Spark workers - name: Instances type: string + - JSONPath: .status.masterPhase + description: Status of Spark master + name: Master + type: string + - JSONPath: .status.replicas + description: Number of desired Spark workers + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready Spark workers + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of Spark cluster + name: Age + type: date group: enterprise.splunk.com names: kind: Spark @@ -20,6 +32,10 @@ spec: singular: spark scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -614,7 +630,7 @@ spec: type: object type: object image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -626,6 +642,7 @@ spec: type: string replicas: description: Number of spark worker pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -891,17 +908,38 @@ spec: status: description: SparkStatus defines the observed state of a Spark cluster properties: - instances: - description: current number of spark worker instances - type: integer + masterPhase: + description: current phase of the spark master + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string phase: - description: current phase of the spark cluster + description: current phase of the spark workers enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready spark workers + format: int32 + type: integer + replicas: + description: number of desired spark workers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object diff --git a/deploy/crds/enterprise.splunk.com_standalones_crd.yaml b/deploy/crds/enterprise.splunk.com_standalones_crd.yaml index afab066d0..e79365f0f 100644 --- a/deploy/crds/enterprise.splunk.com_standalones_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_standalones_crd.yaml @@ -4,14 +4,22 @@ metadata: name: standalones.enterprise.splunk.com spec: additionalPrinterColumns: - - JSONPath: .spec.status.phase + - JSONPath: .status.phase description: Status of standalone instances name: Phase - type: integer - - JSONPath: .spec.status.instances - description: Number of standalone instances - name: Instances type: string + - JSONPath: .status.replicas + description: Number of desired standalone instances + name: Desired + type: integer + - JSONPath: .status.readyReplicas + description: Current number of ready standalone instances + name: Ready + type: integer + - JSONPath: .metadata.creationTimestamp + description: Age of standalone resource + name: Age + type: date group: enterprise.splunk.com names: kind: Standalone @@ -20,6 +28,10 @@ spec: singular: standalone scope: Namespaced subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas status: {} validation: openAPIV3Schema: @@ -627,7 +639,7 @@ spec: volume claims (default=”1Gi”) type: string image: - description: Image to use for Splunk pod containers (overrides SPLUNK_IMAGE + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE environment variables) type: string imagePullPolicy: @@ -637,9 +649,9 @@ spec: - Always - IfNotPresent type: string - indexerRef: - description: IndexerRef refers to a Splunk Enterprise indexer cluster - managed by the operator within Kubernetes + indexerClusterRef: + description: IndexerClusterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes properties: apiVersion: description: API version of the referent. @@ -716,6 +728,7 @@ spec: type: string replicas: description: Number of standalone pods + format: int32 type: integer resources: description: resource requirements for the pod containers @@ -978,7 +991,7 @@ spec: type: object type: object sparkImage: - description: Image to use for Spark pod containers (overrides SPARK_IMAGE + description: Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) type: string sparkRef: @@ -2210,17 +2223,27 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: - instances: - description: current number of standalone instances - type: integer phase: description: current phase of the standalone instances enum: - - pending - - ready - - scaleup - - scaledown - - updating + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready standalone instances + format: int32 + type: integer + replicas: + description: number of desired standalone instances + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler type: string type: object type: object diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 4e289230e..f9b8e77ce 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -46,7 +46,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: "splunk-operator" - - name: SPLUNK_IMAGE - value: "splunk/splunk:8.0" - - name: SPARK_IMAGE + - name: RELATED_IMAGE_SPLUNK_ENTERPRISE + value: "splunk/splunk:edge" + - name: RELATED_IMAGE_SPLUNK_SPARK value: "splunk/spark" diff --git a/docs/ChangeLog.md b/docs/ChangeLog.md index 09f9758e5..3a0589dd1 100644 --- a/docs/ChangeLog.md +++ b/docs/ChangeLog.md @@ -1,6 +1,6 @@ # Splunk Operator for Kubernetes Change Log -## 0.1.0 Alpha (2020-??-??) +## 0.1.0 Alpha (2020-03-20) * This release depends upon changes made concurrently in the Splunk Enterprise container images. You must use the latest splunk/splunk:edge @@ -8,12 +8,18 @@ * The API has been updated to v1alpha2, and involves the replacement of the SplunkEnterprise custom resource with 5 new custom resources: - Spark, LicenseMaster, Standalone, SearchHead and Indexer. Please read the - revised [Custom Resources](CustomResources.md) and [Examples](Examples.md) - documentation for details on all the changes. This is a major update and is - not backwards-compatible. You will have to completely remove any older - versions, and any resources managed by the operator, before upgrading to - this release. + Spark, LicenseMaster, Standalone, SearchHeadCluster and IndexerCluster. + Please read the revised [Custom Resources](CustomResources.md) and + [Examples](Examples.md) documentation for details on all the changes. This + is a major update and is not backwards-compatible. You will have to + completely remove any older versions, and any resources managed by the + operator, before upgrading to this release. + +* Scaling, upgrades and other updates are now more actively managed for the + SearchHeadCluster and IndexerCluster resources. This helps protect against + data loss and maximizes availability while changes are being made. You can + now also use the "kubectl scale" command, and Horizontal Pod Autoscalers + with all resources (except LicenseMaster, which always uses a single Pod). * A new serviceTemplate spec parameter has been added for all Splunk Enterprise custom resources. This may be used to define a template the operator uses for @@ -30,7 +36,7 @@ cluster master warnings about using the default value. * Integrated with CircleCI and Coverall for CICD and code coverage, and - added a bunch of unit tests to bring coverage up to 93%. + added a bunch of unit tests to bring coverage up to over 90%. ## 0.0.6 Alpha (2019-12-12) diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 7278b9d68..613018794 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -10,8 +10,8 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. * [Spark Resource Spec Parameters](#spark-resource-spec-parameters) * [LicenseMaster Resource Spec Parameters](#licensemaster-resource-spec-parameters) * [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters) -* [SearchHead Resource Spec Parameters](#searchhead-resource-spec-parameters) -* [Indexer Resource Spec Parameters](#indexer-resource-spec-parameters) +* [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) +* [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) For examples on how to use these custom resources, please see [Configuring Splunk Enterprise Deployments](Examples.md). @@ -70,7 +70,7 @@ configuration parameters: | Key | Type | Description | | --------------------- | ---------- | ---------------------------------------------------------------------------------------------------------- | -| image | string | Container image to use for pod instances (overrides `SPLUNK_IMAGE` or `SPARK_IMAGE` environment variables) | +| image | string | Container image to use for pod instances (overrides `RELATED_IMAGE_SPLUNK_ENTERPRISE` or `RELATED_IMAGE_SPLUNK_SPARK` environment variables) | | imagePullPolicy | string | Sets pull policy for all images (either "Always" or the default: "IfNotPresent") | | schedulerName | string | Name of [Scheduler](https://kubernetes.io/docs/concepts/scheduling/kube-scheduler/) to use for pod placement (defaults to "default-scheduler") | | affinity | [Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core) | [Kubernetes Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) rules that control how pods are assigned to particular nodes | @@ -95,25 +95,25 @@ spec: name: splunk-licenses licenseMasterRef: name: example - indexerRef: + indexerClusterRef: name: example ``` The following additional configuration parameters may be used for all Splunk -Enterprise resources, including: `Standalone`, `LicenseMaster`, `SearchHead`, -and `Indexer`: +Enterprise resources, including: `Standalone`, `LicenseMaster`, +`SearchHeadCluster`, and `IndexerCluster`: | Key | Type | Description | | ------------------ | ------- | ----------------------------------------------------------------------------- | | storageClassName | string | Name of [StorageClass](StorageClass.md) to use for persistent volume claims | -| etcStorage | string | Storage capacity to request for Splunk etc volume claims (default="1Gi") | -| varStorage | string | Storage capacity to request for Splunk var volume claims (default="200Gi") | +| etcStorage | string | Storage capacity to request for Splunk etc volume claims (default="10Gi") | +| varStorage | string | Storage capacity to request for Splunk var volume claims (default="100Gi") | | volumes | [[]Volume](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#volume-v1-core) | List of one or more [Kubernetes volumes](https://kubernetes.io/docs/concepts/storage/volumes/). These will be mounted in all container pods as as `/mnt/` | | defaults | string | Inline map of [default.yml](https://github.com/splunk/splunk-ansible/blob/develop/docs/advanced/default.yml.spec.md) overrides used to initialize the environment | | defaultsUrl | string | Full path or URL for one or more [default.yml](https://github.com/splunk/splunk-ansible/blob/develop/docs/advanced/default.yml.spec.md) files, separated by commas | | licenseUrl | string | Full path or URL for a Splunk Enterprise license file | | licenseMasterRef | [ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectreference-v1-core) | Reference to a Splunk Operator managed `LicenseMaster` instance (via `name` and optionally `namespace`) to use for licensing | -| indexerRef | [ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectreference-v1-core) | Reference to a Splunk Operator managed `Indexer` instance (via `name` and optionally `namespace`) to use for indexing | +| indexerClusterRef | [ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectreference-v1-core) | Reference to a Splunk Operator managed `IndexerCluster` instance (via `name` and optionally `namespace`) to use for indexing | ## Spark Resource Spec Parameters @@ -175,15 +175,15 @@ the `Standalone` resource provides the following `Spec` configuration parameters | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of standalone replicas (defaults to 1) | -| sparkImage | string | Container image Data Fabric Search (DFS) will use for JDK and Spark libraries (overrides `SPARK_IMAGE` environment variables) | +| sparkImage | string | Container image Data Fabric Search (DFS) will use for JDK and Spark libraries (overrides `RELATED_IMAGE_SPLUNK_SPARK` environment variables) | | sparkRef | [ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectreference-v1-core) | Reference to a Splunk Operator managed `Spark` instance (via `name` and optionally `namespace`). When defined, Data Fabric Search (DFS) will be enabled and configured to use it. | -## SearchHead Resource Spec Parameters +## SearchHeadCluster Resource Spec Parameters ```yaml apiVersion: enterprise.splunk.com/v1alpha2 -kind: SearchHead +kind: SearchHeadCluster metadata: name: example spec: @@ -195,20 +195,20 @@ spec: In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), -the `SearchHead` resource provides the following `Spec` configuration parameters: +the `SearchHeadCluster` resource provides the following `Spec` configuration parameters: | Key | Type | Description | | ---------- | ------- | ------------------------------------------------------------------------------- | | replicas | integer | The number of search heads cluster members (minimum of 3, which is the default) | -| sparkImage | string | Container image Data Fabric Search (DFS) will use for JDK and Spark libraries (overrides `SPARK_IMAGE` environment variables) | +| sparkImage | string | Container image Data Fabric Search (DFS) will use for JDK and Spark libraries (overrides `RELATED_IMAGE_SPLUNK_SPARK` environment variables) | | sparkRef | [ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectreference-v1-core) | Reference to a Splunk Operator managed `Spark` instance (via `name` and optionally `namespace`). When defined, Data Fabric Search (DFS) will be enabled and configured to use it. | -## Indexer Resource Spec Parameters +## IndexerCluster Resource Spec Parameters ```yaml apiVersion: enterprise.splunk.com/v1alpha2 -kind: Indexer +kind: IndexerCluster metadata: name: example spec: @@ -217,7 +217,7 @@ spec: In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), -the `Indexer` resource provides the following `Spec` configuration parameters: +the `IndexerCluster` resource provides the following `Spec` configuration parameters: | Key | Type | Description | | ---------- | ------- | ----------------------------------------------------- | diff --git a/docs/Examples.md b/docs/Examples.md index e783bb23e..d2abf4618 100644 --- a/docs/Examples.md +++ b/docs/Examples.md @@ -37,12 +37,12 @@ metadata: When growing, customers will typically want to first expand by upgrading to an [indexer cluster](https://docs.splunk.com/Documentation/Splunk/latest/Indexer/Aboutindexesandindexers). -The Splunk Operator makes creation of an indexer cluster as easy as creating an `Indexer` resource: +The Splunk Operator makes creation of an indexer cluster as easy as creating an `IndexerCluster` resource: ```yaml cat < 1 - Replicas int `json:"replicas"` -} - -// IndexerStatus defines the observed state of a Splunk Enterprise standalone indexer or cluster of indexers -type IndexerStatus struct { - // current phase of the indexer cluster - // +kubebuilder:validation:Enum=pending;ready;scaleup;scaledown;updating - Phase string `json:"phase"` - - // current phase of the cluster master - // +kubebuilder:validation:Enum=pending;ready;scaleup;scaledown;updating - ClusterMasterPhase string `json:"clusterMasterPhase"` - - // current number of indexer instances - Instances int `json:"instances"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Indexer is the Schema for a Splunk Enterprise standalone indexer or cluster of indexers -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=indexers,scope=Namespaced,shortName=idx -// +kubebuilder:printcolumn:name="Phase",type="integer",JSONPath=".spec.status.phase",description="Status of indexer cluster" -// +kubebuilder:printcolumn:name="CM",type="string",JSONPath=".spec.status.clusterMasterPhase",description="Status of cluster master" -// +kubebuilder:printcolumn:name="Instances",type="string",JSONPath=".spec.status.instances",description="Number of indexers" -type Indexer struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec IndexerSpec `json:"spec,omitempty"` - Status IndexerStatus `json:"status,omitempty"` -} - -// GetIdentifier is a convenience function to return unique identifier for the Splunk enterprise deployment -func (cr *Indexer) GetIdentifier() string { - return cr.ObjectMeta.Name -} - -// GetNamespace is a convenience function to return namespace for a Splunk enterprise deployment -func (cr *Indexer) GetNamespace() string { - return cr.ObjectMeta.Namespace -} - -// GetTypeMeta is a convenience function to return a TypeMeta object -func (cr *Indexer) GetTypeMeta() metav1.TypeMeta { - return cr.TypeMeta -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IndexerList contains a list of Indexer -type IndexerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Indexer `json:"items"` -} - -func init() { - SchemeBuilder.Register(&Indexer{}, &IndexerList{}) -} diff --git a/pkg/apis/enterprise/v1alpha2/indexercluster_types.go b/pkg/apis/enterprise/v1alpha2/indexercluster_types.go new file mode 100644 index 000000000..514120f25 --- /dev/null +++ b/pkg/apis/enterprise/v1alpha2/indexercluster_types.go @@ -0,0 +1,136 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// default all fields to being optional +// +kubebuilder:validation:Optional + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file +// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html +// see also https://book.kubebuilder.io/reference/markers/crd.html + +// IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster +type IndexerClusterSpec struct { + CommonSplunkSpec `json:",inline"` + + // Number of search head pods; a search head cluster will be created if > 1 + Replicas int32 `json:"replicas"` +} + +// IndexerClusterMemberStatus is used to track the status of each indexer cluster peer. +type IndexerClusterMemberStatus struct { + // Unique identifier or GUID for the peer + ID string `json:"guid"` + + // Name of the indexer cluster peer + Name string `json:"name"` + + // Status of the indexer cluster peer + Status string `json:"status"` + + // The ID of the configuration bundle currently being used by the master. + ActiveBundleID string `json:"active_bundle_id"` + + // Count of the number of buckets on this peer, across all indexes. + BucketCount int64 `json:"bucket_count"` + + // Flag indicating if this peer belongs to the current committed generation and is searchable. + Searchable bool `json:"is_searchable"` +} + +// IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster +type IndexerClusterStatus struct { + // current phase of the indexer cluster + Phase ResourcePhase `json:"phase"` + + // current phase of the cluster master + ClusterMasterPhase ResourcePhase `json:"clusterMasterPhase"` + + // desired number of indexer peers + Replicas int32 `json:"replicas"` + + // current number of ready indexer peers + ReadyReplicas int32 `json:"readyReplicas"` + + // selector for pods, used by HorizontalPodAutoscaler + Selector string `json:"selector"` + + // Indicates if the cluster is initialized. + Initialized bool `json:"initialized_flag"` + + // Indicates if the cluster is ready for indexing. + IndexingReady bool `json:"indexing_ready_flag"` + + // Indicates whether the master is ready to begin servicing, based on whether it is initialized. + ServiceReady bool `json:"service_ready_flag"` + + // Indicates if the cluster is in maintenance mode. + MaintenanceMode bool `json:"maintenance_mode"` + + // status of each indexer cluster peer + Peers []IndexerClusterMemberStatus `json:"peers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IndexerCluster is the Schema for a Splunk Enterprise indexer cluster +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=indexerclusters,scope=Namespaced,shortName=idc;idxc +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of indexer cluster" +// +kubebuilder:printcolumn:name="Master",type="string",JSONPath=".status.clusterMasterPhase",description="Status of cluster master" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of indexer peers" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready indexer peers" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of indexer cluster" +type IndexerCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec IndexerClusterSpec `json:"spec,omitempty"` + Status IndexerClusterStatus `json:"status,omitempty"` +} + +// GetIdentifier is a convenience function to return unique identifier for the Splunk enterprise deployment +func (cr *IndexerCluster) GetIdentifier() string { + return cr.ObjectMeta.Name +} + +// GetNamespace is a convenience function to return namespace for a Splunk enterprise deployment +func (cr *IndexerCluster) GetNamespace() string { + return cr.ObjectMeta.Namespace +} + +// GetTypeMeta is a convenience function to return a TypeMeta object +func (cr *IndexerCluster) GetTypeMeta() metav1.TypeMeta { + return cr.TypeMeta +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IndexerClusterList contains a list of IndexerCluster +type IndexerClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []IndexerCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&IndexerCluster{}, &IndexerClusterList{}) +} diff --git a/pkg/apis/enterprise/v1alpha2/licensemaster_types.go b/pkg/apis/enterprise/v1alpha2/licensemaster_types.go index 3b2e1680d..5fe78db16 100644 --- a/pkg/apis/enterprise/v1alpha2/licensemaster_types.go +++ b/pkg/apis/enterprise/v1alpha2/licensemaster_types.go @@ -34,8 +34,7 @@ type LicenseMasterSpec struct { // LicenseMasterStatus defines the observed state of a Splunk Enterprise license master. type LicenseMasterStatus struct { // current phase of the license master - // +kubebuilder:validation:Enum=pending;ready;updating - Phase string `json:"phase"` + Phase ResourcePhase `json:"phase"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -43,7 +42,8 @@ type LicenseMasterStatus struct { // LicenseMaster is the Schema for a Splunk Enterprise license master. // +kubebuilder:subresource:status // +kubebuilder:resource:path=licensemasters,scope=Namespaced,shortName=lm -// +kubebuilder:printcolumn:name="Phase",type="integer",JSONPath=".spec.status.phase",description="Status of license master" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of license master" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of license master" type LicenseMaster struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/pkg/apis/enterprise/v1alpha2/searchhead_types.go b/pkg/apis/enterprise/v1alpha2/searchhead_types.go deleted file mode 100644 index 024e5e965..000000000 --- a/pkg/apis/enterprise/v1alpha2/searchhead_types.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1alpha2 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// default all fields to being optional -// +kubebuilder:validation:Optional - -// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. -// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file -// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html -// see also https://book.kubebuilder.io/reference/markers/crd.html - -// SearchHeadSpec defines the desired state of a Splunk Enterprise standalone search head or cluster of search heads -type SearchHeadSpec struct { - CommonSplunkSpec `json:",inline"` - - // Number of search head pods; a search head cluster will be created if > 1 - Replicas int `json:"replicas"` - - // SparkRef refers to a Spark cluster managed by the operator within Kubernetes - // When defined, Data Fabric Search (DFS) will be enabled and configured to use the Spark cluster. - SparkRef corev1.ObjectReference `json:"sparkRef"` - - // Image to use for Spark pod containers (overrides SPARK_IMAGE environment variables) - SparkImage string `json:"sparkImage"` -} - -// SearchHeadStatus defines the observed state of a Splunk Enterprise standalone search head or cluster of search heads -type SearchHeadStatus struct { - // current phase of the search head cluster - // +kubebuilder:validation:Enum=pending;ready;scaleup;scaledown;updating - Phase string `json:"phase"` - - // current number of search head instances - Instances int `json:"instances"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SearchHead is the Schema for a Splunk Enterprise standalone search head or cluster of search heads -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=searchheads,scope=Namespaced,shortName=search;sh -// +kubebuilder:printcolumn:name="Phase",type="integer",JSONPath=".spec.status.phase",description="Status of search head cluster" -// +kubebuilder:printcolumn:name="Instances",type="string",JSONPath=".spec.status.instances",description="Number of search heads" -type SearchHead struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec SearchHeadSpec `json:"spec,omitempty"` - Status SearchHeadStatus `json:"status,omitempty"` -} - -// GetIdentifier is a convenience function to return unique identifier for the Splunk enterprise deployment -func (cr *SearchHead) GetIdentifier() string { - return cr.ObjectMeta.Name -} - -// GetNamespace is a convenience function to return namespace for a Splunk enterprise deployment -func (cr *SearchHead) GetNamespace() string { - return cr.ObjectMeta.Namespace -} - -// GetTypeMeta is a convenience function to return a TypeMeta object -func (cr *SearchHead) GetTypeMeta() metav1.TypeMeta { - return cr.TypeMeta -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SearchHeadList contains a list of SearcHead -type SearchHeadList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []SearchHead `json:"items"` -} - -func init() { - SchemeBuilder.Register(&SearchHead{}, &SearchHeadList{}) -} diff --git a/pkg/apis/enterprise/v1alpha2/searchheadcluster_types.go b/pkg/apis/enterprise/v1alpha2/searchheadcluster_types.go new file mode 100644 index 000000000..8e71a150c --- /dev/null +++ b/pkg/apis/enterprise/v1alpha2/searchheadcluster_types.go @@ -0,0 +1,147 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1alpha2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// default all fields to being optional +// +kubebuilder:validation:Optional + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. +// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file +// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html +// see also https://book.kubebuilder.io/reference/markers/crd.html + +// SearchHeadClusterSpec defines the desired state of a Splunk Enterprise search head cluster +type SearchHeadClusterSpec struct { + CommonSplunkSpec `json:",inline"` + + // Number of search head pods; a search head cluster will be created if > 1 + Replicas int32 `json:"replicas"` + + // SparkRef refers to a Spark cluster managed by the operator within Kubernetes + // When defined, Data Fabric Search (DFS) will be enabled and configured to use the Spark cluster. + SparkRef corev1.ObjectReference `json:"sparkRef"` + + // Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) + SparkImage string `json:"sparkImage"` +} + +// SearchHeadClusterMemberStatus is used to track the status of each search head cluster member +type SearchHeadClusterMemberStatus struct { + // Name of the search head cluster member + Name string `json:"name"` + + // Indicates the status of the member. + Status string `json:"status"` + + // Flag that indicates if this member can run scheduled searches. + Adhoc bool `json:"adhoc_searchhead"` + + // Indicates if this member is registered with the searchhead cluster captain. + Registered bool `json:"is_registered"` + + // Number of currently running historical searches. + ActiveHistoricalSearchCount int `json:"active_historical_search_count"` + + // Number of currently running realtime searches. + ActiveRealtimeSearchCount int `json:"active_realtime_search_count"` +} + +// SearchHeadClusterStatus defines the observed state of a Splunk Enterprise search head cluster +type SearchHeadClusterStatus struct { + // current phase of the search head cluster + Phase ResourcePhase `json:"phase"` + + // current phase of the deployer + DeployerPhase ResourcePhase `json:"deployerPhase"` + + // desired number of search head cluster members + Replicas int32 `json:"replicas"` + + // current number of ready search head cluster members + ReadyReplicas int32 `json:"readyReplicas"` + + // selector for pods, used by HorizontalPodAutoscaler + Selector string `json:"selector"` + + // name or label of the search head captain + Captain string `json:"captain"` + + // true if the search head cluster's captain is ready to service requests + CaptainReady bool `json:"captainReady"` + + // true if the search head cluster has finished initialization + Initialized bool `json:"initialized"` + + // true if the minimum number of search head cluster members have joined + MinPeersJoined bool `json:"minPeersJoined"` + + // true if the search head cluster is in maintenance mode + MaintenanceMode bool `json:"maintenanceMode"` + + // status of each search head cluster member + Members []SearchHeadClusterMemberStatus `json:"members"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SearchHeadCluster is the Schema for a Splunk Enterprise search head cluster +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=searchheadclusters,scope=Namespaced,shortName=shc +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of search head cluster" +// +kubebuilder:printcolumn:name="Deployer",type="string",JSONPath=".status.deployerPhase",description="Status of the deployer" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of search head cluster members" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready search head cluster members" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of search head cluster" +type SearchHeadCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SearchHeadClusterSpec `json:"spec,omitempty"` + Status SearchHeadClusterStatus `json:"status,omitempty"` +} + +// GetIdentifier is a convenience function to return unique identifier for the Splunk enterprise deployment +func (cr *SearchHeadCluster) GetIdentifier() string { + return cr.ObjectMeta.Name +} + +// GetNamespace is a convenience function to return namespace for a Splunk enterprise deployment +func (cr *SearchHeadCluster) GetNamespace() string { + return cr.ObjectMeta.Namespace +} + +// GetTypeMeta is a convenience function to return a TypeMeta object +func (cr *SearchHeadCluster) GetTypeMeta() metav1.TypeMeta { + return cr.TypeMeta +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SearchHeadClusterList contains a list of SearcHead +type SearchHeadClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SearchHeadCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&SearchHeadCluster{}, &SearchHeadClusterList{}) +} diff --git a/pkg/apis/enterprise/v1alpha2/spark_types.go b/pkg/apis/enterprise/v1alpha2/spark_types.go index e7667689d..abcf92b6c 100644 --- a/pkg/apis/enterprise/v1alpha2/spark_types.go +++ b/pkg/apis/enterprise/v1alpha2/spark_types.go @@ -31,26 +31,38 @@ type SparkSpec struct { CommonSpec `json:",inline"` // Number of spark worker pods - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` } // SparkStatus defines the observed state of a Spark cluster type SparkStatus struct { - // current phase of the spark cluster - // +kubebuilder:validation:Enum=pending;ready;scaleup;scaledown;updating - Phase string `json:"phase"` + // current phase of the spark workers + Phase ResourcePhase `json:"phase"` - // current number of spark worker instances - Instances int `json:"instances"` + // current phase of the spark master + MasterPhase ResourcePhase `json:"masterPhase"` + + // number of desired spark workers + Replicas int32 `json:"replicas"` + + // current number of ready spark workers + ReadyReplicas int32 `json:"readyReplicas"` + + // selector for pods, used by HorizontalPodAutoscaler + Selector string `json:"selector"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Spark is the Schema for a Spark cluster // +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:resource:path=sparks,scope=Namespaced -// +kubebuilder:printcolumn:name="Phase",type="integer",JSONPath=".spec.status.phase",description="Status of Spark cluster" -// +kubebuilder:printcolumn:name="Instances",type="string",JSONPath=".spec.status.instances",description="Number of Spark workers" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of Spark workers" +// +kubebuilder:printcolumn:name="Master",type="string",JSONPath=".status.masterPhase",description="Status of Spark master" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Number of desired Spark workers" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready Spark workers" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of Spark cluster" type Spark struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/pkg/apis/enterprise/v1alpha2/standalone_types.go b/pkg/apis/enterprise/v1alpha2/standalone_types.go index de9be7523..5b2c4b4dc 100644 --- a/pkg/apis/enterprise/v1alpha2/standalone_types.go +++ b/pkg/apis/enterprise/v1alpha2/standalone_types.go @@ -32,33 +32,41 @@ type StandaloneSpec struct { CommonSplunkSpec `json:",inline"` // Number of standalone pods - Replicas int `json:"replicas"` + Replicas int32 `json:"replicas"` // SparkRef refers to a Spark cluster managed by the operator within Kubernetes // When defined, Data Fabric Search (DFS) will be enabled and configured to use the Spark cluster. SparkRef corev1.ObjectReference `json:"sparkRef"` - // Image to use for Spark pod containers (overrides SPARK_IMAGE environment variables) + // Image to use for Spark pod containers (overrides RELATED_IMAGE_SPLUNK_SPARK environment variables) SparkImage string `json:"sparkImage"` } // StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. type StandaloneStatus struct { // current phase of the standalone instances - // +kubebuilder:validation:Enum=pending;ready;scaleup;scaledown;updating - Phase string `json:"phase"` + Phase ResourcePhase `json:"phase"` - // current number of standalone instances - Instances int `json:"instances"` + // number of desired standalone instances + Replicas int32 `json:"replicas"` + + // current number of ready standalone instances + ReadyReplicas int32 `json:"readyReplicas"` + + // selector for pods, used by HorizontalPodAutoscaler + Selector string `json:"selector"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Standalone is the Schema for a Splunk Enterprise standalone instances. // +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:resource:path=standalones,scope=Namespaced -// +kubebuilder:printcolumn:name="Phase",type="integer",JSONPath=".spec.status.phase",description="Status of standalone instances" -// +kubebuilder:printcolumn:name="Instances",type="string",JSONPath=".spec.status.instances",description="Number of standalone instances" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of standalone instances" +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Number of desired standalone instances" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready standalone instances" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of standalone resource" type Standalone struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/pkg/apis/enterprise/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/enterprise/v1alpha2/zz_generated.deepcopy.go index 3a547fba6..5f89f9b5f 100644 --- a/pkg/apis/enterprise/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/enterprise/v1alpha2/zz_generated.deepcopy.go @@ -40,7 +40,7 @@ func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { } } out.LicenseMasterRef = in.LicenseMasterRef - out.IndexerRef = in.IndexerRef + out.IndexerClusterRef = in.IndexerClusterRef return } @@ -55,27 +55,27 @@ func (in *CommonSplunkSpec) DeepCopy() *CommonSplunkSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Indexer) DeepCopyInto(out *Indexer) { +func (in *IndexerCluster) DeepCopyInto(out *IndexerCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Indexer. -func (in *Indexer) DeepCopy() *Indexer { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerCluster. +func (in *IndexerCluster) DeepCopy() *IndexerCluster { if in == nil { return nil } - out := new(Indexer) + out := new(IndexerCluster) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Indexer) DeepCopyObject() runtime.Object { +func (in *IndexerCluster) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -83,13 +83,13 @@ func (in *Indexer) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerList) DeepCopyInto(out *IndexerList) { +func (in *IndexerClusterList) DeepCopyInto(out *IndexerClusterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Indexer, len(*in)) + *out = make([]IndexerCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -97,18 +97,18 @@ func (in *IndexerList) DeepCopyInto(out *IndexerList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerList. -func (in *IndexerList) DeepCopy() *IndexerList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterList. +func (in *IndexerClusterList) DeepCopy() *IndexerClusterList { if in == nil { return nil } - out := new(IndexerList) + out := new(IndexerClusterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IndexerList) DeepCopyObject() runtime.Object { +func (in *IndexerClusterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -116,34 +116,55 @@ func (in *IndexerList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerSpec) DeepCopyInto(out *IndexerSpec) { +func (in *IndexerClusterMemberStatus) DeepCopyInto(out *IndexerClusterMemberStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterMemberStatus. +func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { + if in == nil { + return nil + } + out := new(IndexerClusterMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerSpec. -func (in *IndexerSpec) DeepCopy() *IndexerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. +func (in *IndexerClusterSpec) DeepCopy() *IndexerClusterSpec { if in == nil { return nil } - out := new(IndexerSpec) + out := new(IndexerClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerStatus) DeepCopyInto(out *IndexerStatus) { +func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = *in + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]IndexerClusterMemberStatus, len(*in)) + copy(*out, *in) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerStatus. -func (in *IndexerStatus) DeepCopy() *IndexerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. +func (in *IndexerClusterStatus) DeepCopy() *IndexerClusterStatus { if in == nil { return nil } - out := new(IndexerStatus) + out := new(IndexerClusterStatus) in.DeepCopyInto(out) return out } @@ -243,27 +264,27 @@ func (in *LicenseMasterStatus) DeepCopy() *LicenseMasterStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHead) DeepCopyInto(out *SearchHead) { +func (in *SearchHeadCluster) DeepCopyInto(out *SearchHeadCluster) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHead. -func (in *SearchHead) DeepCopy() *SearchHead { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadCluster. +func (in *SearchHeadCluster) DeepCopy() *SearchHeadCluster { if in == nil { return nil } - out := new(SearchHead) + out := new(SearchHeadCluster) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SearchHead) DeepCopyObject() runtime.Object { +func (in *SearchHeadCluster) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -271,13 +292,13 @@ func (in *SearchHead) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadList) DeepCopyInto(out *SearchHeadList) { +func (in *SearchHeadClusterList) DeepCopyInto(out *SearchHeadClusterList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]SearchHead, len(*in)) + *out = make([]SearchHeadCluster, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -285,18 +306,18 @@ func (in *SearchHeadList) DeepCopyInto(out *SearchHeadList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadList. -func (in *SearchHeadList) DeepCopy() *SearchHeadList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterList. +func (in *SearchHeadClusterList) DeepCopy() *SearchHeadClusterList { if in == nil { return nil } - out := new(SearchHeadList) + out := new(SearchHeadClusterList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SearchHeadList) DeepCopyObject() runtime.Object { +func (in *SearchHeadClusterList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -304,35 +325,56 @@ func (in *SearchHeadList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadSpec) DeepCopyInto(out *SearchHeadSpec) { +func (in *SearchHeadClusterMemberStatus) DeepCopyInto(out *SearchHeadClusterMemberStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterMemberStatus. +func (in *SearchHeadClusterMemberStatus) DeepCopy() *SearchHeadClusterMemberStatus { + if in == nil { + return nil + } + out := new(SearchHeadClusterMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchHeadClusterSpec) DeepCopyInto(out *SearchHeadClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) out.SparkRef = in.SparkRef return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadSpec. -func (in *SearchHeadSpec) DeepCopy() *SearchHeadSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterSpec. +func (in *SearchHeadClusterSpec) DeepCopy() *SearchHeadClusterSpec { if in == nil { return nil } - out := new(SearchHeadSpec) + out := new(SearchHeadClusterSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadStatus) DeepCopyInto(out *SearchHeadStatus) { +func (in *SearchHeadClusterStatus) DeepCopyInto(out *SearchHeadClusterStatus) { *out = *in + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]SearchHeadClusterMemberStatus, len(*in)) + copy(*out, *in) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadStatus. -func (in *SearchHeadStatus) DeepCopy() *SearchHeadStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterStatus. +func (in *SearchHeadClusterStatus) DeepCopy() *SearchHeadClusterStatus { if in == nil { return nil } - out := new(SearchHeadStatus) + out := new(SearchHeadClusterStatus) in.DeepCopyInto(out) return out } diff --git a/pkg/controller/add_indexer.go b/pkg/controller/add_indexercluster.go similarity index 51% rename from pkg/controller/add_indexer.go rename to pkg/controller/add_indexercluster.go index 968130198..ba7589760 100644 --- a/pkg/controller/add_indexer.go +++ b/pkg/controller/add_indexercluster.go @@ -1,10 +1,10 @@ package controller import ( - "github.com/splunk/splunk-operator/pkg/controller/indexer" + "github.com/splunk/splunk-operator/pkg/controller/indexercluster" ) func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, indexer.Add) + AddToManagerFuncs = append(AddToManagerFuncs, indexercluster.Add) } diff --git a/pkg/controller/add_searchhead.go b/pkg/controller/add_searchheadcluster.go similarity index 50% rename from pkg/controller/add_searchhead.go rename to pkg/controller/add_searchheadcluster.go index ca307faa3..e00c82356 100644 --- a/pkg/controller/add_searchhead.go +++ b/pkg/controller/add_searchheadcluster.go @@ -1,10 +1,10 @@ package controller import ( - "github.com/splunk/splunk-operator/pkg/controller/searchhead" + "github.com/splunk/splunk-operator/pkg/controller/searchheadcluster" ) func init() { // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. - AddToManagerFuncs = append(AddToManagerFuncs, searchhead.Add) + AddToManagerFuncs = append(AddToManagerFuncs, searchheadcluster.Add) } diff --git a/pkg/controller/indexer/indexer_controller.go b/pkg/controller/indexercluster/indexercluster_controller.go similarity index 69% rename from pkg/controller/indexer/indexer_controller.go rename to pkg/controller/indexercluster/indexercluster_controller.go index 2aa3ddc79..f0b66ba6a 100644 --- a/pkg/controller/indexer/indexer_controller.go +++ b/pkg/controller/indexercluster/indexercluster_controller.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package indexer +package indexercluster import ( "context" @@ -39,7 +39,7 @@ var log = logf.Log.WithName("controller_indexer") * business logic. Delete these comments after modifying this file.* */ -// Add creates a new Indexer Controller and adds it to the Manager. The Manager will set fields on the Controller +// Add creates a new IndexerCluster Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { return add(mgr, newReconciler(mgr)) @@ -47,7 +47,7 @@ func Add(mgr manager.Manager) error { // newReconciler returns a new reconcile.Reconciler func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileIndexer{client: mgr.GetClient(), scheme: mgr.GetScheme()} + return &ReconcileIndexerCluster{client: mgr.GetClient(), scheme: mgr.GetScheme()} } // add adds a new Controller to mgr with r as the reconcile.Reconciler @@ -58,16 +58,16 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } - // Watch for changes to primary resource Indexer - err = c.Watch(&source.Kind{Type: &enterprisev1.Indexer{}}, &handler.EnqueueRequestForObject{}) + // Watch for changes to primary resource IndexerCluster + err = c.Watch(&source.Kind{Type: &enterprisev1.IndexerCluster{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err } - // Watch for changes to secondary resource StatefulSets and requeue the owner Indexer + // Watch for changes to secondary resource StatefulSets and requeue the owner IndexerCluster err = c.Watch(&source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{ IsController: true, - OwnerType: &enterprisev1.Indexer{}, + OwnerType: &enterprisev1.IndexerCluster{}, }) if err != nil { return err @@ -76,30 +76,30 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return nil } -// blank assignment to verify that ReconcileIndexer implements reconcile.Reconciler -var _ reconcile.Reconciler = &ReconcileIndexer{} +// blank assignment to verify that ReconcileIndexerCluster implements reconcile.Reconciler +var _ reconcile.Reconciler = &ReconcileIndexerCluster{} -// ReconcileIndexer reconciles a Indexer object -type ReconcileIndexer struct { +// ReconcileIndexerCluster reconciles a IndexerCluster object +type ReconcileIndexerCluster struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme } -// Reconcile reads that state of the cluster for a Indexer object and makes changes based on the state read -// and what is in the Indexer.Spec +// Reconcile reads that state of the cluster for a IndexerCluster object and makes changes based on the state read +// and what is in the IndexerCluster.Spec // TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates // a Pod as an example // Note: // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileIndexer) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileIndexerCluster) Reconcile(request reconcile.Request) (reconcile.Result, error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - reqLogger.Info("Reconciling Indexer") + reqLogger.Info("Reconciling IndexerCluster") - // Fetch the Indexer instance - instance := &enterprisev1.Indexer{} + // Fetch the IndexerCluster instance + instance := &enterprisev1.IndexerCluster{} err := r.client.Get(context.TODO(), request.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { @@ -113,13 +113,18 @@ func (r *ReconcileIndexer) Reconcile(request reconcile.Request) (reconcile.Resul } instance.TypeMeta.APIVersion = "enterprise.splunk.com/v1alpha2" - instance.TypeMeta.Kind = "Indexer" + instance.TypeMeta.Kind = "IndexerCluster" - err = splunkreconcile.ReconcileIndexer(r.client, instance) + result, err := splunkreconcile.ApplyIndexerCluster(r.client, instance) if err != nil { - return reconcile.Result{}, err + reqLogger.Error(err, "IndexerCluster reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil + } + if result.Requeue { + reqLogger.Info("IndexerCluster reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil } - reqLogger.Info("Indexer reconciliation complete") + reqLogger.Info("IndexerCluster reconciliation complete") return reconcile.Result{}, nil } diff --git a/pkg/controller/licensemaster/licensemaster_controller.go b/pkg/controller/licensemaster/licensemaster_controller.go index 582a7b820..9b640cd45 100644 --- a/pkg/controller/licensemaster/licensemaster_controller.go +++ b/pkg/controller/licensemaster/licensemaster_controller.go @@ -115,9 +115,14 @@ func (r *ReconcileLicenseMaster) Reconcile(request reconcile.Request) (reconcile instance.TypeMeta.APIVersion = "enterprise.splunk.com/v1alpha2" instance.TypeMeta.Kind = "LicenseMaster" - err = splunkreconcile.ReconcileLicenseMaster(r.client, instance) + result, err := splunkreconcile.ApplyLicenseMaster(r.client, instance) if err != nil { - return reconcile.Result{}, err + reqLogger.Error(err, "LicenseMaster reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil + } + if result.Requeue { + reqLogger.Info("LicenseMaster reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil } reqLogger.Info("LicenseMaster reconciliation complete") diff --git a/pkg/controller/searchhead/searchhead_controller.go b/pkg/controller/searchheadcluster/searchheadcluster_controller.go similarity index 68% rename from pkg/controller/searchhead/searchhead_controller.go rename to pkg/controller/searchheadcluster/searchheadcluster_controller.go index 9b68482a8..09f0738ff 100644 --- a/pkg/controller/searchhead/searchhead_controller.go +++ b/pkg/controller/searchheadcluster/searchheadcluster_controller.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package searchhead +package searchheadcluster import ( "context" @@ -39,7 +39,7 @@ var log = logf.Log.WithName("controller_searchhead") * business logic. Delete these comments after modifying this file.* */ -// Add creates a new SearchHead Controller and adds it to the Manager. The Manager will set fields on the Controller +// Add creates a new SearchHeadCluster Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { return add(mgr, newReconciler(mgr)) @@ -47,7 +47,7 @@ func Add(mgr manager.Manager) error { // newReconciler returns a new reconcile.Reconciler func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileSearchHead{client: mgr.GetClient(), scheme: mgr.GetScheme()} + return &ReconcileSearchHeadCluster{client: mgr.GetClient(), scheme: mgr.GetScheme()} } // add adds a new Controller to mgr with r as the reconcile.Reconciler @@ -58,16 +58,16 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } - // Watch for changes to primary resource SearchHead - err = c.Watch(&source.Kind{Type: &enterprisev1.SearchHead{}}, &handler.EnqueueRequestForObject{}) + // Watch for changes to primary resource SearchHeadCluster + err = c.Watch(&source.Kind{Type: &enterprisev1.SearchHeadCluster{}}, &handler.EnqueueRequestForObject{}) if err != nil { return err } - // Watch for changes to secondary resource StatefulSets and requeue the owner SearchHead + // Watch for changes to secondary resource StatefulSets and requeue the owner SearchHeadCluster err = c.Watch(&source.Kind{Type: &appsv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{ IsController: true, - OwnerType: &enterprisev1.SearchHead{}, + OwnerType: &enterprisev1.SearchHeadCluster{}, }) if err != nil { return err @@ -76,30 +76,30 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return nil } -// blank assignment to verify that ReconcileSearchHead implements reconcile.Reconciler -var _ reconcile.Reconciler = &ReconcileSearchHead{} +// blank assignment to verify that ReconcileSearchHeadCluster implements reconcile.Reconciler +var _ reconcile.Reconciler = &ReconcileSearchHeadCluster{} -// ReconcileSearchHead reconciles a SearchHead object -type ReconcileSearchHead struct { +// ReconcileSearchHeadCluster reconciles a SearchHeadCluster object +type ReconcileSearchHeadCluster struct { // This client, initialized using mgr.Client() above, is a split client // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme } -// Reconcile reads that state of the cluster for a SearchHead object and makes changes based on the state read -// and what is in the SearchHead.Spec +// Reconcile reads that state of the cluster for a SearchHeadCluster object and makes changes based on the state read +// and what is in the SearchHeadCluster.Spec // TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates // a Pod as an example // Note: // The Controller will requeue the Request to be processed again if the returned error is non-nil or // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileSearchHead) Reconcile(request reconcile.Request) (reconcile.Result, error) { +func (r *ReconcileSearchHeadCluster) Reconcile(request reconcile.Request) (reconcile.Result, error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - reqLogger.Info("Reconciling SearchHead") + reqLogger.Info("Reconciling SearchHeadCluster") - // Fetch the SearchHead instance - instance := &enterprisev1.SearchHead{} + // Fetch the SearchHeadCluster instance + instance := &enterprisev1.SearchHeadCluster{} err := r.client.Get(context.TODO(), request.NamespacedName, instance) if err != nil { if errors.IsNotFound(err) { @@ -113,13 +113,18 @@ func (r *ReconcileSearchHead) Reconcile(request reconcile.Request) (reconcile.Re } instance.TypeMeta.APIVersion = "enterprise.splunk.com/v1alpha2" - instance.TypeMeta.Kind = "SearchHead" + instance.TypeMeta.Kind = "SearchHeadCluster" - err = splunkreconcile.ReconcileSearchHead(r.client, instance) + result, err := splunkreconcile.ApplySearchHeadCluster(r.client, instance) if err != nil { - return reconcile.Result{}, err + reqLogger.Error(err, "SearchHeadCluster reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil + } + if result.Requeue { + reqLogger.Info("SearchHeadCluster reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil } - reqLogger.Info("SearchHead reconciliation complete") + reqLogger.Info("SearchHeadCluster reconciliation complete") return reconcile.Result{}, nil } diff --git a/pkg/controller/spark/spark_controller.go b/pkg/controller/spark/spark_controller.go index ebf57b700..daecd624e 100644 --- a/pkg/controller/spark/spark_controller.go +++ b/pkg/controller/spark/spark_controller.go @@ -115,9 +115,14 @@ func (r *ReconcileSpark) Reconcile(request reconcile.Request) (reconcile.Result, instance.TypeMeta.APIVersion = "enterprise.splunk.com/v1alpha2" instance.TypeMeta.Kind = "Spark" - err = splunkreconcile.ReconcileSpark(r.client, instance) + result, err := splunkreconcile.ApplySpark(r.client, instance) if err != nil { - return reconcile.Result{}, err + reqLogger.Error(err, "Spark reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil + } + if result.Requeue { + reqLogger.Info("Spark reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil } reqLogger.Info("Spark reconciliation complete") diff --git a/pkg/controller/standalone/standalone_controller.go b/pkg/controller/standalone/standalone_controller.go index 2792c5cf3..f5bd11f86 100644 --- a/pkg/controller/standalone/standalone_controller.go +++ b/pkg/controller/standalone/standalone_controller.go @@ -115,9 +115,14 @@ func (r *ReconcileStandalone) Reconcile(request reconcile.Request) (reconcile.Re instance.TypeMeta.APIVersion = "enterprise.splunk.com/v1alpha2" instance.TypeMeta.Kind = "Standalone" - err = splunkreconcile.ReconcileStandalone(r.client, instance) + result, err := splunkreconcile.ApplyStandalone(r.client, instance) if err != nil { - return reconcile.Result{}, err + reqLogger.Error(err, "Standalone reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil + } + if result.Requeue { + reqLogger.Info("Standalone reconciliation requeued", "RequeueAfter", result.RequeueAfter) + return result, nil } reqLogger.Info("Standalone reconciliation complete") diff --git a/pkg/splunk/client/doc.go b/pkg/splunk/client/doc.go new file mode 100644 index 000000000..2ae81ce56 --- /dev/null +++ b/pkg/splunk/client/doc.go @@ -0,0 +1,19 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides a simple client for the Splunk Enterprise REST API. +This package has no depedencies outside of the standard go library. +*/ +package client diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go new file mode 100644 index 000000000..f00a77f97 --- /dev/null +++ b/pkg/splunk/client/enterprise.go @@ -0,0 +1,610 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "time" +) + +// SplunkHTTPClient defines the interface used by SplunkClient. +// It is used to mock alternative implementations used for testing. +type SplunkHTTPClient interface { + Do(*http.Request) (*http.Response, error) +} + +// SplunkClient is a simple object used to send HTTP REST API requests +type SplunkClient struct { + // https endpoint for management interface (e.g. "https://server:8089") + ManagementURI string + + // username for authentication + Username string + + // password for authentication + Password string + + // HTTP client used to process requests + Client SplunkHTTPClient +} + +// NewSplunkClient returns a new SplunkClient object initialized with a username and password. +func NewSplunkClient(managementURI, username, password string) *SplunkClient { + return &SplunkClient{ + ManagementURI: managementURI, + Username: username, + Password: password, + Client: &http.Client{ + Timeout: 5 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // don't verify ssl certs + }, + }, + } +} + +// Do processes a Splunk REST API request and unmarshals response into obj, if not nil. +func (c *SplunkClient) Do(request *http.Request, expectedStatus int, obj interface{}) error { + // send HTTP response and check status + request.SetBasicAuth(c.Username, c.Password) + response, err := c.Client.Do(request) + if err != nil { + return err + } + if response.StatusCode != expectedStatus { + return fmt.Errorf("Response code=%d from %s; want %d", response.StatusCode, request.URL, expectedStatus) + } + if obj == nil { + return nil + } + + // unmarshall response if obj != nil + data, _ := ioutil.ReadAll(response.Body) + if len(data) == 0 { + return fmt.Errorf("Received empty response body from %s", request.URL) + } + return json.Unmarshal(data, obj) +} + +// Get sends a REST API request and unmarshals response into obj, if not nil. +func (c *SplunkClient) Get(path string, obj interface{}) error { + endpoint := fmt.Sprintf("%s%s?count=0&output_mode=json", c.ManagementURI, path) + request, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return err + } + return c.Do(request, 200, obj) +} + +// SearchHeadCaptainInfo represents the status of the search head cluster. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#shcluster.2Fcaptain.2Finfo +type SearchHeadCaptainInfo struct { + // Id of this SH cluster. This is used as the unique identifier for the Search Head Cluster in bundle replication and acceleration summary management. + Identifier string `json:"id"` + + // Time when the current captain was elected + ElectedCaptain int64 `json:"elected_captain"` + + // Indicates if the searchhead cluster is initialized. + Initialized bool `json:"initialized_flag"` + + // The name for the captain. Displayed on the Splunk Web manager page. + Label string `json:"label"` + + // Indicates if the cluster is in maintenance mode. + MaintenanceMode bool `json:"maintenance_mode"` + + // Flag to indicate if more then replication_factor peers have joined the cluster. + MinPeersJoined bool `json:"min_peers_joined_flag"` + + // URI of the current captain. + PeerSchemeHostPort string `json:"peer_scheme_host_port"` + + // Indicates whether the captain is restarting the members in a searchhead cluster. + RollingRestart bool `json:"rolling_restart_flag"` + + // Indicates whether the captain is ready to begin servicing, based on whether it is initialized. + ServiceReady bool `json:"service_ready_flag"` + + // Timestamp corresponding to the creation of the captain. + StartTime int64 `json:"start_time"` +} + +// GetSearchHeadCaptainInfo queries the captain for info about the search head cluster. +// You can use this on any member of a search head cluster. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#shcluster.2Fcaptain.2Finfo +func (c *SplunkClient) GetSearchHeadCaptainInfo() (*SearchHeadCaptainInfo, error) { + apiResponse := struct { + Entry []struct { + Content SearchHeadCaptainInfo `json:"content"` + } `json:"entry"` + }{} + path := "/services/shcluster/captain/info" + err := c.Get(path, &apiResponse) + if err != nil { + return nil, err + } + if len(apiResponse.Entry) < 1 { + return nil, fmt.Errorf("Invalid response from %s%s", c.ManagementURI, path) + } + return &apiResponse.Entry[0].Content, nil +} + +// SearchHeadCaptainMemberInfo represents the status of a search head cluster member (captain endpoint). +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#shcluster.2Fcaptain.2Fmembers +type SearchHeadCaptainMemberInfo struct { + // Flag that indicates if this member can run scheduled searches. + Adhoc bool `json:"adhoc_searchhead"` + + // Flag to indicate if this peer advertised that it needed a restart. + AdvertiseRestartRequired bool `json:"advertise_restart_required"` + + // Number of artifacts on this peer. + ArtifactCount int `json:"artifact_count"` + + // The host and management port advertised by this peer. + HostPortPair string `json:"host_port_pair"` + + // True if this member is the SHC captain. + Captain bool `json:"is_captain"` + + // Host and port of the kv store instance of this member. + KVStoreHostPort string `json:"kv_store_host_port"` + + // The name for this member. Displayed on the Splunk Web manager page. + Label string `json:"label"` + + // Timestamp for last heartbeat recieved from the peer + LastHeartbeat int64 `json:"last_heartbeat"` + + // REST API endpoint for management + ManagementURI string `json:"mgmt_url"` + + // URI of the current captain. + PeerSchemeHostPort string `json:"peer_scheme_host_port"` + + // Used by the captain to keep track of pending jobs requested by the captain to this member. + PendingJobCount int `json:"pending_job_count"` + + // Number of replications this peer is part of, as either source or target. + ReplicationCount int `json:"replication_count"` + + // TCP port to listen for replicated data from another cluster member. + ReplicationPort int `json:"replication_port"` + + // Indicates whether to use SSL when sending replication data. + ReplicationUseSSL bool `json:"replication_use_ssl"` + + // Indicates the status of the member. + Status string `json:"status"` +} + +// GetSearchHeadCaptainMembers queries the search head captain for info about cluster members. +// You can only use this on a search head cluster captain. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#shcluster.2Fcaptain.2Fmembers +func (c *SplunkClient) GetSearchHeadCaptainMembers() (map[string]SearchHeadCaptainMemberInfo, error) { + apiResponse := struct { + Entry []struct { + Content SearchHeadCaptainMemberInfo `json:"content"` + } `json:"entry"` + }{} + path := "/services/shcluster/captain/members" + err := c.Get(path, &apiResponse) + if err != nil { + return nil, err + } + + members := make(map[string]SearchHeadCaptainMemberInfo) + for _, e := range apiResponse.Entry { + members[e.Content.Label] = e.Content + } + + return members, nil +} + +// SearchHeadClusterMemberInfo represents the status of a search head cluster member. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#shcluster.2Fmember.2Finfo +type SearchHeadClusterMemberInfo struct { + // Number of currently running historical searches. + ActiveHistoricalSearchCount int `json:"active_historical_search_count"` + + // Number of currently running realtime searches. + ActiveRealtimeSearchCount int `json:"active_realtime_search_count"` + + // Flag that indicates if this member can run scheduled searches. + Adhoc bool `json:"adhoc_searchhead"` + + // Indicates if this member is registered with the searchhead cluster captain. + Registered bool `json:"is_registered"` + + // Timestamp for the last attempt to contact the captain. + LastHeartbeatAttempt int64 `json:"last_heartbeat_attempt"` + + // Number of scheduled searches run in the last 15 minutes. + PeerLoadStatsGla15m int `json:"peer_load_stats_gla_15m"` + + // Number of scheduled searches run in the last one minute. + PeerLoadStatsGla1m int `json:"peer_load_stats_gla_1m"` + + // Number of scheduled searches run in the last five minutes. + PeerLoadStatsGla5m int `json:"peer_load_stats_gla_5m"` + + // Indicates whether the member needs to be restarted to enable its searchhead cluster configuration. + RestartState string `json:"restart_state"` + + // Indicates the status of the member. + Status string `json:"status"` +} + +// GetSearchHeadClusterMemberInfo queries info from a search head cluster member. +// You can use this on any member of a search head cluster. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#shcluster.2Fmember.2Finfo +func (c *SplunkClient) GetSearchHeadClusterMemberInfo() (*SearchHeadClusterMemberInfo, error) { + apiResponse := struct { + Entry []struct { + Content SearchHeadClusterMemberInfo `json:"content"` + } `json:"entry"` + }{} + path := "/services/shcluster/member/info" + err := c.Get(path, &apiResponse) + if err != nil { + return nil, err + } + if len(apiResponse.Entry) < 1 { + return nil, fmt.Errorf("Invalid response from %s%s", c.ManagementURI, path) + } + return &apiResponse.Entry[0].Content, nil +} + +// SetSearchHeadDetention enables or disables detention of a search head cluster member. +// You can use this on any member of a search head cluster. +// See https://docs.splunk.com/Documentation/Splunk/latest/DistSearch/SHdetention +func (c *SplunkClient) SetSearchHeadDetention(detain bool) error { + mode := "off" + if detain { + mode = "on" + } + endpoint := fmt.Sprintf("%s/services/shcluster/member/control/control/set_manual_detention?manual_detention=%s", c.ManagementURI, mode) + request, err := http.NewRequest("POST", endpoint, nil) + if err != nil { + return err + } + return c.Do(request, 200, nil) +} + +// RemoveSearchHeadClusterMember removes a search head cluster member. +// You can use this on any member of a search head cluster. +// See https://docs.splunk.com/Documentation/Splunk/latest/DistSearch/Removeaclustermember +func (c *SplunkClient) RemoveSearchHeadClusterMember() error { + // sent request to remove from search head cluster consensus + endpoint := fmt.Sprintf("%s/services/shcluster/member/consensus/default/remove_server?output_mode=json", c.ManagementURI) + request, err := http.NewRequest("POST", endpoint, nil) + if err != nil { + return err + } + + // send HTTP response and check status + request.SetBasicAuth(c.Username, c.Password) + response, err := c.Client.Do(request) + if err != nil { + return err + } + if response.StatusCode == 200 { + return nil + } + if response.StatusCode != 503 { + return fmt.Errorf("Response code=%d from %s; want %d", response.StatusCode, request.URL, 200) + } + + // unmarshall 503 response + apiResponse := struct { + Messages []struct { + Text string `json:"text"` + } `json:"messages"` + }{} + data, _ := ioutil.ReadAll(response.Body) + if len(data) == 0 { + return fmt.Errorf("Received 503 response with empty body from %s", request.URL) + } + err = json.Unmarshal(data, &apiResponse) + if err != nil { + return fmt.Errorf("Failed to unmarshal response from %s: %v", request.URL, err) + } + + // check if request failed because member was already removed + if len(apiResponse.Messages) == 0 { + return fmt.Errorf("Received 503 response with empty Messages from %s", request.URL) + } + msg1 := regexp.MustCompile(`Server .* is not part of configuration, hence cannot be removed`) + msg2 := regexp.MustCompile(`This node is not part of any cluster configuration`) + if msg1.Match([]byte(apiResponse.Messages[0].Text)) || msg2.Match([]byte(apiResponse.Messages[0].Text)) { + // it was already removed -> ignore error + return nil + } + + return fmt.Errorf("Received unrecognized 503 response from %s", request.URL) +} + +// ClusterBundleInfo represents the status of a configuration bundle. +type ClusterBundleInfo struct { + // BundlePath is filesystem path to the file represending the bundle + BundlePath string `json:"bundle_path"` + + // Checksum used to verify bundle integrity + Checksum string `json:"checksum"` + + // Timestamp of the bundle + Timestamp int64 `json:"timestamp"` +} + +// ClusterMasterInfo represents the status of the indexer cluster master. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Finfo +type ClusterMasterInfo struct { + // Indicates if the cluster is initialized. + Initialized bool `json:"initialized_flag"` + + // Indicates if the cluster is ready for indexing. + IndexingReady bool `json:"indexing_ready_flag"` + + // Indicates whether the master is ready to begin servicing, based on whether it is initialized. + ServiceReady bool `json:"service_ready_flag"` + + // Indicates if the cluster is in maintenance mode. + MaintenanceMode bool `json:"maintenance_mode"` + + // Indicates whether the master is restarting the peers in a cluster. + RollingRestart bool `json:"rolling_restart_flag"` + + // The name for the master. Displayed in the Splunk Web manager page. + Label string `json:"label"` + + // Provides information about the active bundle for this master. + ActiveBundle ClusterBundleInfo `json:"active_bundle"` + + // The most recent information reflecting any changes made to the master-apps configuration bundle. + // In steady state, this is equal to active_bundle. If it is not equal, then pushing the latest bundle to all peers is in process (or needs to be started). + LatestBundle ClusterBundleInfo `json:"latest_bundle"` + + // Timestamp corresponding to the creation of the master. + StartTime int64 `json:"start_time"` +} + +// GetClusterMasterInfo queries the cluster master for info about the indexer cluster. +// You can only use this on a cluster master. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Finfo +func (c *SplunkClient) GetClusterMasterInfo() (*ClusterMasterInfo, error) { + apiResponse := struct { + Entry []struct { + Content ClusterMasterInfo `json:"content"` + } `json:"entry"` + }{} + path := "/services/cluster/master/info" + err := c.Get(path, &apiResponse) + if err != nil { + return nil, err + } + if len(apiResponse.Entry) < 1 { + return nil, fmt.Errorf("Invalid response from %s%s", c.ManagementURI, path) + } + return &apiResponse.Entry[0].Content, nil +} + +// IndexerClusterPeerInfo represents the status of a indexer cluster peer. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fslave.2Finfo +type IndexerClusterPeerInfo struct { + // Current bundle being used by this peer. + ActiveBundle ClusterBundleInfo `json:"active_bundle"` + + // Lists information about the most recent bundle downloaded from the master. + LatestBundle ClusterBundleInfo `json:"latest_bundle"` + + // The initial bundle generation ID recognized by this peer. Any searches from previous generations fail. + // The initial bundle generation ID is created when a peer first comes online, restarts, or recontacts the master. + // Note that this is reported as a very large number (18446744073709552000) that breaks Go's JSON library, while the peer is being decommissioned. + //BaseGenerationID uint64 `json:"base_generation_id"` + + // Indicates if this peer is registered with the master in the cluster. + Registered bool `json:"is_registered"` + + // Timestamp for the last attempt to contact the master. + LastHeartbeatAttempt int64 `json:"last_heartbeat_attempt"` + + // Indicates whether the peer needs to be restarted to enable its cluster configuration. + RestartState string `json:"restart_state"` + + // Indicates the status of the peer. + Status string `json:"status"` +} + +// GetIndexerClusterPeerInfo queries info from a indexer cluster peer. +// You can use this on any peer in an indexer cluster. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fslave.2Finfo +func (c *SplunkClient) GetIndexerClusterPeerInfo() (*IndexerClusterPeerInfo, error) { + apiResponse := struct { + Entry []struct { + Content IndexerClusterPeerInfo `json:"content"` + } `json:"entry"` + }{} + path := "/services/cluster/slave/info" + err := c.Get(path, &apiResponse) + if err != nil { + return nil, err + } + if len(apiResponse.Entry) < 1 { + return nil, fmt.Errorf("Invalid response from %s%s", c.ManagementURI, path) + } + return &apiResponse.Entry[0].Content, nil +} + +// ClusterMasterPeerInfo represents the status of a indexer cluster peer (cluster master endpoint). +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Fpeers +type ClusterMasterPeerInfo struct { + // Unique identifier or GUID for the peer + ID string `json:"guid"` + + // The name for the peer. Displayed on the manager page. + Label string `json:"label"` + + // The ID of the configuration bundle currently being used by the master. + ActiveBundleID string `json:"active_bundle_id"` + + // The initial bundle generation ID recognized by this peer. Any searches from previous generations fail. + // The initial bundle generation ID is created when a peer first comes online, restarts, or recontacts the master. + // Note that this is reported as a very large number (18446744073709552000) that breaks Go's JSON library, while the peer is being decommissioned. + //BaseGenerationID uint64 `json:"base_generation_id"` + + // Count of the number of buckets on this peer, across all indexes. + BucketCount int64 `json:"bucket_count"` + + // Count of the number of buckets by index on this peer. + BucketCountByIndex map[string]int64 `json:"bucket_count_by_index"` + + // Flag indicating if this peer has started heartbeating. + HeartbeatStarted bool `json:"heartbeat_started"` + + // The host and port advertised to peers for the data replication channel. + // Can be either of the form IP:port or hostname:port. + HostPortPair string `json:"host_port_pair"` + + // Flag indicating if this peer belongs to the current committed generation and is searchable. + Searchable bool `json:"is_searchable"` + + // Timestamp for last heartbeat recieved from the peer. + LastHeartbeat int64 `json:"last_heartbeat"` + + // The ID of the configuration bundle this peer is using. + LatestBundleID string `json:"latest_bundle_id"` + + // Used by the master to keep track of pending jobs requested by the master to this peer. + PendingJobCount int `json:"pending_job_count"` + + // Number of buckets for which the peer is primary in its local site, or the number of buckets that return search results from same site as the peer. + PrimaryCount int64 `json:"primary_count"` + + // Number of buckets for which the peer is primary that are not in its local site. + PrimaryCountRemote int64 `json:"primary_count_remote"` + + // Number of replications this peer is part of, as either source or target. + ReplicationCount int `json:"replication_count"` + + // TCP port to listen for replicated data from another cluster member. + ReplicationPort int `json:"replication_port"` + + // Indicates whether to use SSL when sending replication data. + ReplicationUseSSL bool `json:"replication_use_ssl"` + + // To which site the peer belongs. + Site string `json:"site"` + + // Indicates the status of the peer. + Status string `json:"status"` + + // Lists the number of buckets on the peer for each search state for the bucket. + SearchStateCounter struct { + Searchable int64 `json:"Searchable"` + Unsearchable int64 `json:"Unsearchable"` + PendingSearchable int64 `json:"PendingSearchable"` + SearchablePendingMask int64 `json:"SearchablePendingMask"` + } `json:"search_state_counter"` + + // Lists the number of buckets on the peer for each bucket status. + StatusCounter struct { + // complete (warm/cold) bucket + Complete int64 `json:"Complete"` + + // target of replication for already completed (warm/cold) bucket + NonStreamingTarget int64 `json:"NonStreamingTarget"` + + // bucket pending truncation + PendingTruncate int64 `json:"PendingTruncate"` + + // bucket pending discard + PendingDiscard int64 `json:"PendingDiscard"` + + // bucket that is not replicated + Standalone int64 `json:"Standalone"` + + // copy of streaming bucket where some error was encountered + StreamingError int64 `json:"StreamingError"` + + // streaming hot bucket on source side + StreamingSource int64 `json:"StreamingSource"` + + // streaming hot bucket copy on target side + StreamingTarget int64 `json:"StreamingTarget"` + + // uninitialized + Unset int64 `json:"Unset"` + } `json:"status_counter"` +} + +// GetClusterMasterPeers queries the cluster master for info about indexer cluster peers. +// You can only use this on a cluster master. +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Fpeers +func (c *SplunkClient) GetClusterMasterPeers() (map[string]ClusterMasterPeerInfo, error) { + apiResponse := struct { + Entry []struct { + Name string `json:"name"` + Content ClusterMasterPeerInfo `json:"content"` + } `json:"entry"` + }{} + path := "/services/cluster/master/peers" + err := c.Get(path, &apiResponse) + if err != nil { + return nil, err + } + + peers := make(map[string]ClusterMasterPeerInfo) + for _, e := range apiResponse.Entry { + e.Content.ID = e.Name + peers[e.Content.Label] = e.Content + } + + return peers, nil +} + +// RemoveIndexerClusterPeer removes peer from an indexer cluster, where id=unique GUID for the peer. +// You can only use this on a cluster master. +// See https://docs.splunk.com/Documentation/Splunk/8.0.2/Indexer/Removepeerfrommasterlist +func (c *SplunkClient) RemoveIndexerClusterPeer(id string) error { + // sent request to remove from search head cluster consensus + endpoint := fmt.Sprintf("%s/services/cluster/master/control/control/remove_peers?peers=%s", c.ManagementURI, id) + request, err := http.NewRequest("POST", endpoint, nil) + if err != nil { + return err + } + return c.Do(request, 200, nil) +} + +// DecommissionIndexerClusterPeer takes an indexer cluster peer offline using the decommission endpoint. +// You can use this on any peer in an indexer cluster. +// See https://docs.splunk.com/Documentation/Splunk/latest/Indexer/Takeapeeroffline +func (c *SplunkClient) DecommissionIndexerClusterPeer(enforceCounts bool) error { + enforceCountsAsInt := 0 + if enforceCounts { + enforceCountsAsInt = 1 + } + endpoint := fmt.Sprintf("%s/services/cluster/slave/control/control/decommission?enforce_counts=%d", c.ManagementURI, enforceCountsAsInt) + request, err := http.NewRequest("POST", endpoint, nil) + if err != nil { + return err + } + return c.Do(request, 200, nil) +} diff --git a/pkg/splunk/client/enterprise_test.go b/pkg/splunk/client/enterprise_test.go new file mode 100644 index 000000000..2aacad891 --- /dev/null +++ b/pkg/splunk/client/enterprise_test.go @@ -0,0 +1,345 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "net/http" + "testing" + + spltest "github.com/splunk/splunk-operator/pkg/splunk/test" +) + +func splunkClientTester(t *testing.T, testMethod string, status int, body string, wantRequest *http.Request, test func(SplunkClient) error) { + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandler(wantRequest, status, body, nil) + c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd") + c.Client = mockSplunkClient + err := test(*c) + if err != nil { + t.Errorf("%s err = %v", testMethod, err) + } + mockSplunkClient.CheckRequests(t, testMethod) +} + +func TestGetSearchHeadCaptainInfo(t *testing.T) { + wantRequest, _ := http.NewRequest("GET", "https://localhost:8089/services/shcluster/captain/info?count=0&output_mode=json", nil) + wantCaptainLabel := "splunk-s2-search-head-0" + test := func(c SplunkClient) error { + captainInfo, err := c.GetSearchHeadCaptainInfo() + if err != nil { + return err + } + if captainInfo.Label != wantCaptainLabel { + t.Errorf("captainInfo.Label=%s; want %s", captainInfo.Label, wantCaptainLabel) + } + return nil + } + body := `{"links":{},"origin":"https://localhost:8089/services/shcluster/captain/info","updated":"2020-03-15T16:36:42+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"captain","id":"https://localhost:8089/services/shcluster/captain/info/captain","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/captain/info/captain","list":"/services/shcluster/captain/info/captain"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"eai:acl":null,"elected_captain":1584139352,"id":"A9D5FCCF-EB93-4E0A-93E1-45B56483EA7A","initialized_flag":true,"label":"splunk-s2-search-head-0","maintenance_mode":false,"mgmt_uri":"https://splunk-s2-search-head-0.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","min_peers_joined_flag":true,"peer_scheme_host_port":"https://splunk-s2-search-head-0.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","rolling_restart_flag":false,"service_ready_flag":true,"start_time":1584139291}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetSearchHeadCaptainInfo", 200, body, wantRequest, test) + + // test body with no entries + test = func(c SplunkClient) error { + _, err := c.GetSearchHeadCaptainInfo() + if err == nil { + t.Errorf("GetSearchHeadCaptainInfo returned nil; want error") + } + return nil + } + body = `{"links":{},"origin":"https://localhost:8089/services/shcluster/captain/info","updated":"2020-03-15T16:36:42+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[]}` + splunkClientTester(t, "TestGetSearchHeadCaptainInfo", 200, body, wantRequest, test) + + // test empty body + splunkClientTester(t, "TestGetSearchHeadCaptainInfo", 200, "", wantRequest, test) + + // test error code + splunkClientTester(t, "TestGetSearchHeadCaptainInfo", 500, "", wantRequest, test) +} + +func TestGetSearchHeadClusterMemberInfo(t *testing.T) { + wantRequest, _ := http.NewRequest("GET", "https://localhost:8089/services/shcluster/member/info?count=0&output_mode=json", nil) + wantMemberStatus := "Up" + test := func(c SplunkClient) error { + memberInfo, err := c.GetSearchHeadClusterMemberInfo() + if err != nil { + return err + } + if memberInfo.Status != wantMemberStatus { + t.Errorf("memberInfo.Status=%s; want %s", memberInfo.Status, wantMemberStatus) + } + return nil + } + body := `{"links":{},"origin":"https://localhost:8089/services/shcluster/member/info","updated":"2020-03-15T16:30:38+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"member","id":"https://localhost:8089/services/shcluster/member/info/member","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/member/info/member","list":"/services/shcluster/member/info/member"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_historical_search_count":0,"active_realtime_search_count":0,"adhoc_searchhead":false,"eai:acl":null,"is_registered":true,"last_heartbeat_attempt":1584289836,"maintenance_mode":false,"no_artifact_replications":false,"peer_load_stats_gla_15m":0,"peer_load_stats_gla_1m":0,"peer_load_stats_gla_5m":0,"peer_load_stats_max_runtime":0,"peer_load_stats_num_autosummary":0,"peer_load_stats_num_historical":0,"peer_load_stats_num_realtime":0,"peer_load_stats_num_running":0,"peer_load_stats_total_runtime":0,"restart_state":"NoRestart","status":"Up"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetSearchHeadClusterMemberInfo", 200, body, wantRequest, test) + + // test body with no entries + test = func(c SplunkClient) error { + _, err := c.GetSearchHeadClusterMemberInfo() + if err == nil { + t.Errorf("GetSearchHeadClusterMemberInfo returned nil; want error") + } + return nil + } + body = `{"links":{},"origin":"https://localhost:8089/services/shcluster/captain/info","updated":"2020-03-15T16:36:42+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[]}` + splunkClientTester(t, "TestGetSearchHeadCaptainInfo", 200, body, wantRequest, test) + + // test empty body + splunkClientTester(t, "TestGetSearchHeadClusterMemberInfo", 200, "", wantRequest, test) + + // test error code + splunkClientTester(t, "TestGetSearchHeadClusterMemberInfo", 500, "", wantRequest, test) +} + +func TestGetSearchHeadCaptainMembers(t *testing.T) { + wantRequest, _ := http.NewRequest("GET", "https://localhost:8089/services/shcluster/captain/members?count=0&output_mode=json", nil) + wantMembers := []string{ + "splunk-s2-search-head-0", "splunk-s2-search-head-1", "splunk-s2-search-head-2", "splunk-s2-search-head-3", "splunk-s2-search-head-4", + } + wantStatus := "Up" + wantCaptain := "splunk-s2-search-head-0" + test := func(c SplunkClient) error { + members, err := c.GetSearchHeadCaptainMembers() + if err != nil { + return err + } + if len(members) != len(wantMembers) { + t.Errorf("len(members)=%d; want %d", len(members), len(wantMembers)) + } + for n := range wantMembers { + member, ok := members[wantMembers[n]] + if !ok { + t.Errorf("wanted member not found: %s", wantMembers[n]) + } + if member.Status != wantStatus { + t.Errorf("member %s want Status=%s: got %s", wantMembers[n], member.Status, wantStatus) + } + if member.Captain { + if wantMembers[n] != wantCaptain { + t.Errorf("member %s want Captain=%t: got %t", wantMembers[n], false, true) + } + } else { + if wantMembers[n] == wantCaptain { + t.Errorf("member %s want Captain=%t: got %t", wantMembers[n], true, false) + } + } + } + return nil + } + body := `{"links":{"create":"/services/shcluster/captain/members/_new"},"origin":"https://localhost:8089/services/shcluster/captain/members","updated":"2020-03-15T16:40:20+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"7D571849-CD52-48F4-B76A-E83C4E86E300","id":"https://localhost:8089/services/shcluster/captain/members/7D571849-CD52-48F4-B76A-E83C4E86E300","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/captain/members/7D571849-CD52-48F4-B76A-E83C4E86E300","list":"/services/shcluster/captain/members/7D571849-CD52-48F4-B76A-E83C4E86E300","edit":"/services/shcluster/captain/members/7D571849-CD52-48F4-B76A-E83C4E86E300"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"adhoc_searchhead":false,"advertise_restart_required":false,"artifact_count":2,"delayed_artifacts_to_discard":[],"eai:acl":null,"fixup_set":[],"host_port_pair":"10.42.0.3:8089","is_captain":false,"kv_store_host_port":"splunk-s2-search-head-2.splunk-s2-search-head-headless.splunk.svc.cluster.local:8191","label":"splunk-s2-search-head-2","last_heartbeat":1584290418,"mgmt_uri":"https://splunk-s2-search-head-2.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","no_artifact_replications":false,"peer_scheme_host_port":"https://10.42.0.3:8089","pending_job_count":0,"preferred_captain":false,"replication_count":0,"replication_port":9887,"replication_use_ssl":false,"site":"default","status":"Up","status_counter":{"Complete":2,"NonStreamingTarget":0,"PendingDiscard":0}}},{"name":"90D7E074-9880-4867-BAA1-31A74EC28DC0","id":"https://localhost:8089/services/shcluster/captain/members/90D7E074-9880-4867-BAA1-31A74EC28DC0","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/captain/members/90D7E074-9880-4867-BAA1-31A74EC28DC0","list":"/services/shcluster/captain/members/90D7E074-9880-4867-BAA1-31A74EC28DC0","edit":"/services/shcluster/captain/members/90D7E074-9880-4867-BAA1-31A74EC28DC0"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"adhoc_searchhead":false,"advertise_restart_required":false,"artifact_count":0,"delayed_artifacts_to_discard":[],"eai:acl":null,"fixup_set":[],"host_port_pair":"10.42.0.2:8089","is_captain":true,"kv_store_host_port":"splunk-s2-search-head-0.splunk-s2-search-head-headless.splunk.svc.cluster.local:8191","label":"splunk-s2-search-head-0","last_heartbeat":1584290416,"mgmt_uri":"https://splunk-s2-search-head-0.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","no_artifact_replications":false,"peer_scheme_host_port":"https://10.42.0.2:8089","pending_job_count":0,"preferred_captain":true,"replication_count":0,"replication_port":9887,"replication_use_ssl":false,"site":"default","status":"Up","status_counter":{"Complete":0,"NonStreamingTarget":0,"PendingDiscard":0}}},{"name":"97B56FAE-E9C9-4B12-8B1E-A428E7859417","id":"https://localhost:8089/services/shcluster/captain/members/97B56FAE-E9C9-4B12-8B1E-A428E7859417","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/captain/members/97B56FAE-E9C9-4B12-8B1E-A428E7859417","list":"/services/shcluster/captain/members/97B56FAE-E9C9-4B12-8B1E-A428E7859417","edit":"/services/shcluster/captain/members/97B56FAE-E9C9-4B12-8B1E-A428E7859417"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"adhoc_searchhead":false,"advertise_restart_required":false,"artifact_count":1,"delayed_artifacts_to_discard":[],"eai:acl":null,"fixup_set":[],"host_port_pair":"10.36.0.7:8089","is_captain":false,"kv_store_host_port":"splunk-s2-search-head-1.splunk-s2-search-head-headless.splunk.svc.cluster.local:8191","label":"splunk-s2-search-head-1","last_heartbeat":1584290418,"mgmt_uri":"https://splunk-s2-search-head-1.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","no_artifact_replications":false,"peer_scheme_host_port":"https://10.36.0.7:8089","pending_job_count":0,"preferred_captain":false,"replication_count":0,"replication_port":9887,"replication_use_ssl":false,"site":"default","status":"Up","status_counter":{"Complete":1,"NonStreamingTarget":0,"PendingDiscard":0}}},{"name":"AA55C39A-5A3A-47CC-BF2C-2B60F0F6C561","id":"https://localhost:8089/services/shcluster/captain/members/AA55C39A-5A3A-47CC-BF2C-2B60F0F6C561","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/captain/members/AA55C39A-5A3A-47CC-BF2C-2B60F0F6C561","list":"/services/shcluster/captain/members/AA55C39A-5A3A-47CC-BF2C-2B60F0F6C561","edit":"/services/shcluster/captain/members/AA55C39A-5A3A-47CC-BF2C-2B60F0F6C561"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"adhoc_searchhead":false,"advertise_restart_required":false,"artifact_count":1,"delayed_artifacts_to_discard":[],"eai:acl":null,"fixup_set":[],"host_port_pair":"10.42.0.5:8089","is_captain":false,"kv_store_host_port":"splunk-s2-search-head-4.splunk-s2-search-head-headless.splunk.svc.cluster.local:8191","label":"splunk-s2-search-head-4","last_heartbeat":1584290417,"mgmt_uri":"https://splunk-s2-search-head-4.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","no_artifact_replications":false,"peer_scheme_host_port":"https://10.42.0.5:8089","pending_job_count":0,"preferred_captain":false,"replication_count":0,"replication_port":9887,"replication_use_ssl":false,"site":"default","status":"Up","status_counter":{"Complete":1,"NonStreamingTarget":0,"PendingDiscard":0}}},{"name":"E271B238-921F-4F6E-BD99-E110EB7B0FDA","id":"https://localhost:8089/services/shcluster/captain/members/E271B238-921F-4F6E-BD99-E110EB7B0FDA","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/captain/members/E271B238-921F-4F6E-BD99-E110EB7B0FDA","list":"/services/shcluster/captain/members/E271B238-921F-4F6E-BD99-E110EB7B0FDA","edit":"/services/shcluster/captain/members/E271B238-921F-4F6E-BD99-E110EB7B0FDA"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"adhoc_searchhead":false,"advertise_restart_required":false,"artifact_count":2,"delayed_artifacts_to_discard":[],"eai:acl":null,"fixup_set":[],"host_port_pair":"10.40.0.4:8089","is_captain":false,"kv_store_host_port":"splunk-s2-search-head-3.splunk-s2-search-head-headless.splunk.svc.cluster.local:8191","label":"splunk-s2-search-head-3","last_heartbeat":1584290420,"mgmt_uri":"https://splunk-s2-search-head-3.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","no_artifact_replications":false,"peer_scheme_host_port":"https://10.40.0.4:8089","pending_job_count":0,"preferred_captain":false,"replication_count":0,"replication_port":9887,"replication_use_ssl":false,"site":"default","status":"Up","status_counter":{"Complete":2,"NonStreamingTarget":0,"PendingDiscard":0}}}],"paging":{"total":5,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetSearchHeadCaptainMembers", 200, body, wantRequest, test) + + // test error response + test = func(c SplunkClient) error { + _, err := c.GetSearchHeadCaptainMembers() + if err == nil { + t.Errorf("GetSearchHeadCaptainMembers returned nil; want error") + } + return nil + } + splunkClientTester(t, "TestGetSearchHeadCaptainMembers", 503, "", wantRequest, test) +} + +func TestSetSearchHeadDetention(t *testing.T) { + wantRequest, _ := http.NewRequest("POST", "https://localhost:8089/services/shcluster/member/control/control/set_manual_detention?manual_detention=on", nil) + test := func(c SplunkClient) error { + return c.SetSearchHeadDetention(true) + } + splunkClientTester(t, "TestSetSearchHeadDetention", 200, "", wantRequest, test) +} + +func TestRemoveSearchHeadClusterMember(t *testing.T) { + // test for 200 response first (sent on first removal request) + wantRequest, _ := http.NewRequest("POST", "https://localhost:8089/services/shcluster/member/consensus/default/remove_server?output_mode=json", nil) + test := func(c SplunkClient) error { + return c.RemoveSearchHeadClusterMember() + } + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 200, "", wantRequest, test) + + // next test 503 error message (sent for short period after removal, while SH is updating itself) + body := `{"messages":[{"type":"ERROR","text":"Failed to proxy call to member https://splunk-s2-search-head-0.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089. ERROR: Server https://splunk-s2-search-head-3.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089 is not part of configuration, hence cannot be removed. Check configuration by making GET request onto /services/shcluster/member/consensus"}]}` + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 503, body, wantRequest, test) + + // check alternate 503 message (sent after SH has completed removal) + body = `{"messages":[{"type":"ERROR","text":"This node is not part of any cluster configuration, please re-run the command from an active cluster member. Also see \"splunk add shcluster-member\" to add this member to an existing cluster or see \"splunk bootstrap shcluster-captain\" to bootstrap a new cluster with this member."}]}` + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 503, body, wantRequest, test) + + // test unrecognized response message + test = func(c SplunkClient) error { + err := c.RemoveSearchHeadClusterMember() + if err == nil { + t.Errorf("RemoveSearchHeadClusterMember returned nil; want error") + } + return nil + } + body = `{"messages":[{"type":"ERROR","text":"Nothing that we are expecting."}]}` + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 503, body, wantRequest, test) + + // test empty messages array in response + body = `{"messages":[]}` + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 503, body, wantRequest, test) + + // test unmarshal failure + body = `` + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 503, body, wantRequest, test) + + // test empty response + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 503, "", wantRequest, test) + + // test bad response code + splunkClientTester(t, "TestRemoveSearchHeadClusterMember", 404, "", wantRequest, test) +} + +func TestGetClusterMasterInfo(t *testing.T) { + wantRequest, _ := http.NewRequest("GET", "https://localhost:8089/services/cluster/master/info?count=0&output_mode=json", nil) + wantInfo := ClusterMasterInfo{ + Initialized: true, + IndexingReady: true, + ServiceReady: true, + MaintenanceMode: false, + RollingRestart: false, + Label: "splunk-s1-cluster-master-0", + ActiveBundle: ClusterBundleInfo{ + BundlePath: "/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle", + Checksum: "14310A4AABD23E85BBD4559C4A3B59F8", + Timestamp: 1583870198, + }, + LatestBundle: ClusterBundleInfo{ + BundlePath: "/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle", + Checksum: "14310A4AABD23E85BBD4559C4A3B59F8", + Timestamp: 1583870198, + }, + StartTime: 1583948636, + } + test := func(c SplunkClient) error { + gotInfo, err := c.GetClusterMasterInfo() + if err != nil { + return err + } + if *gotInfo != wantInfo { + t.Errorf("info.Status=%v; want %v", *gotInfo, wantInfo) + } + return nil + } + body := `{"links":{},"origin":"https://localhost:8089/services/cluster/master/info","updated":"2020-03-18T01:04:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"master","id":"https://localhost:8089/services/cluster/master/info/master","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/info/master","list":"/services/cluster/master/info/master"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"apply_bundle_status":{"invalid_bundle":{"bundle_path":"","bundle_validation_errors_on_master":[],"checksum":"","timestamp":0},"reload_bundle_issued":false,"status":"None"},"backup_and_restore_primaries":false,"controlled_rolling_restart_flag":false,"eai:acl":null,"indexing_ready_flag":true,"initialized_flag":true,"label":"splunk-s1-cluster-master-0","last_check_restart_bundle_result":false,"last_dry_run_bundle":{"bundle_path":"","checksum":"","timestamp":0},"last_validated_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/0af7c0e95f313f7be3b0cb1d878df9a1-1583948640.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","is_valid_bundle":true,"timestamp":1583948640},"latest_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"maintenance_mode":false,"multisite":false,"previous_active_bundle":{"bundle_path":"","checksum":"","timestamp":0},"primaries_backup_status":"No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.","quiet_period_flag":false,"rolling_restart_flag":false,"rolling_restart_or_upgrade":false,"service_ready_flag":true,"start_time":1583948636,"summary_replication":"false"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetClusterMasterInfo", 200, body, wantRequest, test) + + // test body with no entries + test = func(c SplunkClient) error { + _, err := c.GetClusterMasterInfo() + if err == nil { + t.Errorf("GetClusterMasterInfo returned nil; want error") + } + return nil + } + body = `{"links":{},"origin":"https://localhost:8089/services/cluster/master/info","updated":"2020-03-18T01:04:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetClusterMasterInfo", 200, body, wantRequest, test) + + // test error code + splunkClientTester(t, "TestGetClusterMasterInfo", 500, "", wantRequest, test) +} + +func TestGetIndexerClusterPeerInfo(t *testing.T) { + wantRequest, _ := http.NewRequest("GET", "https://localhost:8089/services/cluster/slave/info?count=0&output_mode=json", nil) + wantMemberStatus := "Up" + test := func(c SplunkClient) error { + info, err := c.GetIndexerClusterPeerInfo() + if err != nil { + return err + } + if info.Status != wantMemberStatus { + t.Errorf("info.Status=%s; want %s", info.Status, wantMemberStatus) + } + return nil + } + body := `{"links":{},"origin":"https://localhost:8089/services/cluster/slave/info","updated":"2020-03-18T01:28:18+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"slave","id":"https://localhost:8089/services/cluster/slave/info/slave","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/slave/info/slave","list":"/services/cluster/slave/info/slave"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/87c8c24e7fabc3ff9683c26652cb5890-1583870244.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870244},"base_generation_id":26,"eai:acl":null,"is_registered":true,"last_dry_run_bundle":{"bundle_path":"","checksum":"","timestamp":0},"last_heartbeat_attempt":0,"latest_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/87c8c24e7fabc3ff9683c26652cb5890-1583870244.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870244},"maintenance_mode":false,"registered_summary_state":3,"restart_state":"NoRestart","site":"default","status":"Up"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetIndexerClusterPeerInfo", 200, body, wantRequest, test) + + // test body with no entries + test = func(c SplunkClient) error { + _, err := c.GetIndexerClusterPeerInfo() + if err == nil { + t.Errorf("GetIndexerClusterPeerInfo returned nil; want error") + } + return nil + } + body = `{"links":{},"origin":"https://localhost:8089/services/cluster/slave/info","updated":"2020-03-18T01:28:18+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetIndexerClusterPeerInfo", 200, body, wantRequest, test) + + // test error code + splunkClientTester(t, "TestGetIndexerClusterPeerInfo", 500, "", wantRequest, test) +} + +func TestGetClusterMasterPeers(t *testing.T) { + wantRequest, _ := http.NewRequest("GET", "https://localhost:8089/services/cluster/master/peers?count=0&output_mode=json", nil) + var wantPeers = []struct { + ID string + Label string + Status string + }{ + {ID: "D39B1729-E2C5-4273-B9B2-534DA7C2F866", Label: "splunk-s1-indexer-0", Status: "Up"}, + } + test := func(c SplunkClient) error { + peers, err := c.GetClusterMasterPeers() + if err != nil { + return err + } + if len(peers) != len(wantPeers) { + t.Errorf("len(peers)=%d; want %d", len(peers), len(wantPeers)) + } + for n := range wantPeers { + p, ok := peers[wantPeers[n].Label] + if !ok { + t.Errorf("wanted peer not found: %s", wantPeers[n].Label) + } + if p.ID != wantPeers[n].ID { + t.Errorf("peer %s want ID=%s: got %s", wantPeers[n].Label, p.ID, wantPeers[n].ID) + } + if p.Label != wantPeers[n].Label { + t.Errorf("peer %s want Label=%s: got %s", wantPeers[n].Label, p.Label, wantPeers[n].Label) + } + if p.Status != wantPeers[n].Status { + t.Errorf("peer %s want Status=%s: got %s", wantPeers[n].Label, p.Status, wantPeers[n].Status) + } + } + return nil + } + body := `{"links":{"create":"/services/cluster/master/peers/_new"},"origin":"https://localhost:8089/services/cluster/master/peers","updated":"2020-03-18T01:08:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"D39B1729-E2C5-4273-B9B2-534DA7C2F866","id":"https://localhost:8089/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","list":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","edit":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","apply_bundle_status":{"invalid_bundle":{"bundle_validation_errors":[],"invalid_bundle_id":""},"reasons_for_restart":[],"restart_required_for_apply_bundle":false,"status":"None"},"base_generation_id":26,"bucket_count":73,"bucket_count_by_index":{"_audit":24,"_internal":45,"_telemetry":4},"buckets_rf_by_origin_site":{"default":73},"buckets_sf_by_origin_site":{"default":73},"delayed_buckets_to_discard":[],"eai:acl":null,"fixup_set":[],"heartbeat_started":true,"host_port_pair":"10.36.0.6:8089","indexing_disk_space":210707374080,"is_searchable":true,"is_valid_bundle":true,"label":"splunk-s1-indexer-0","last_dry_run_bundle":"","last_heartbeat":1584493732,"last_validated_bundle":"14310A4AABD23E85BBD4559C4A3B59F8","latest_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","peer_registered_summaries":true,"pending_builds":[],"pending_job_count":0,"primary_count":73,"primary_count_remote":0,"register_search_address":"10.36.0.6:8089","replication_count":0,"replication_port":9887,"replication_use_ssl":false,"restart_required_for_applying_dry_run_bundle":false,"search_state_counter":{"PendingSearchable":0,"Searchable":73,"SearchablePendingMask":0,"Unsearchable":0},"site":"default","splunk_version":"8.0.2","status":"Up","status_counter":{"Complete":69,"NonStreamingTarget":0,"StreamingSource":4,"StreamingTarget":0},"summary_replication_count":0}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}` + splunkClientTester(t, "TestGetClusterMasterPeers", 200, body, wantRequest, test) + + // test error response + test = func(c SplunkClient) error { + _, err := c.GetClusterMasterPeers() + if err == nil { + t.Errorf("GetClusterMasterPeers returned nil; want error") + } + return nil + } + splunkClientTester(t, "TestGetClusterMasterPeers", 503, "", wantRequest, test) +} + +func TestRemoveIndexerClusterPeer(t *testing.T) { + wantRequest, _ := http.NewRequest("POST", "https://localhost:8089/services/cluster/master/control/control/remove_peers?peers=D39B1729-E2C5-4273-B9B2-534DA7C2F866", nil) + test := func(c SplunkClient) error { + return c.RemoveIndexerClusterPeer("D39B1729-E2C5-4273-B9B2-534DA7C2F866") + } + splunkClientTester(t, "TestRemoveIndexerClusterPeer", 200, "", wantRequest, test) +} + +func TestDecommissionIndexerClusterPeer(t *testing.T) { + wantRequest, _ := http.NewRequest("POST", "https://localhost:8089/services/cluster/slave/control/control/decommission?enforce_counts=1", nil) + test := func(c SplunkClient) error { + return c.DecommissionIndexerClusterPeer(true) + } + splunkClientTester(t, "TestDecommissionIndexerClusterPeer", 200, "", wantRequest, test) +} diff --git a/pkg/splunk/enterprise/configuration.go b/pkg/splunk/enterprise/configuration.go index df1d41cbb..1a2c5da26 100644 --- a/pkg/splunk/enterprise/configuration.go +++ b/pkg/splunk/enterprise/configuration.go @@ -37,12 +37,12 @@ func getSplunkVolumeClaims(cr enterprisev1.MetaObject, spec *enterprisev1.Common var etcStorage, varStorage resource.Quantity var err error - etcStorage, err = resources.ParseResourceQuantity(spec.EtcStorage, "1Gi") + etcStorage, err = resources.ParseResourceQuantity(spec.EtcStorage, "10Gi") if err != nil { return []corev1.PersistentVolumeClaim{}, fmt.Errorf("%s: %s", "etcStorage", err) } - varStorage, err = resources.ParseResourceQuantity(spec.VarStorage, "200Gi") + varStorage, err = resources.ParseResourceQuantity(spec.VarStorage, "100Gi") if err != nil { return []corev1.PersistentVolumeClaim{}, fmt.Errorf("%s: %s", "varStorage", err) } @@ -107,7 +107,7 @@ func GetStandaloneStatefulSet(cr *enterprisev1.Standalone) (*appsv1.StatefulSet, } // GetSearchHeadStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise search heads. -func GetSearchHeadStatefulSet(cr *enterprisev1.SearchHead) (*appsv1.StatefulSet, error) { +func GetSearchHeadStatefulSet(cr *enterprisev1.SearchHeadCluster) (*appsv1.StatefulSet, error) { // get search head env variables with deployer env := getSearchHeadExtraEnv(cr, cr.Spec.Replicas) @@ -131,17 +131,17 @@ func GetSearchHeadStatefulSet(cr *enterprisev1.SearchHead) (*appsv1.StatefulSet, } // GetIndexerStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise indexers. -func GetIndexerStatefulSet(cr *enterprisev1.Indexer) (*appsv1.StatefulSet, error) { +func GetIndexerStatefulSet(cr *enterprisev1.IndexerCluster) (*appsv1.StatefulSet, error) { return getSplunkStatefulSet(cr, &cr.Spec.CommonSplunkSpec, SplunkIndexer, cr.Spec.Replicas, getIndexerExtraEnv(cr, cr.Spec.Replicas)) } // GetClusterMasterStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. -func GetClusterMasterStatefulSet(cr *enterprisev1.Indexer) (*appsv1.StatefulSet, error) { +func GetClusterMasterStatefulSet(cr *enterprisev1.IndexerCluster) (*appsv1.StatefulSet, error) { return getSplunkStatefulSet(cr, &cr.Spec.CommonSplunkSpec, SplunkClusterMaster, 1, getIndexerExtraEnv(cr, cr.Spec.Replicas)) } // GetDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. -func GetDeployerStatefulSet(cr *enterprisev1.SearchHead) (*appsv1.StatefulSet, error) { +func GetDeployerStatefulSet(cr *enterprisev1.SearchHeadCluster) (*appsv1.StatefulSet, error) { return getSplunkStatefulSet(cr, &cr.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(cr, cr.Spec.Replicas)) } @@ -217,16 +217,16 @@ func validateCommonSplunkSpec(spec *enterprisev1.CommonSplunkSpec) error { return resources.ValidateCommonSpec(&spec.CommonSpec, defaultResources) } -// ValidateIndexerSpec checks validity and makes default updates to a IndexerSpec, and returns error if something is wrong. -func ValidateIndexerSpec(spec *enterprisev1.IndexerSpec) error { +// ValidateIndexerClusterSpec checks validity and makes default updates to a IndexerClusterSpec, and returns error if something is wrong. +func ValidateIndexerClusterSpec(spec *enterprisev1.IndexerClusterSpec) error { if spec.Replicas == 0 { spec.Replicas = 1 } return validateCommonSplunkSpec(&spec.CommonSplunkSpec) } -// ValidateSearchHeadSpec checks validity and makes default updates to a SearchHeadSpec, and returns error if something is wrong. -func ValidateSearchHeadSpec(spec *enterprisev1.SearchHeadSpec) error { +// ValidateSearchHeadClusterSpec checks validity and makes default updates to a SearchHeadClusterSpec, and returns error if something is wrong. +func ValidateSearchHeadClusterSpec(spec *enterprisev1.SearchHeadClusterSpec) error { if spec.Replicas < 3 { spec.Replicas = 3 } @@ -462,10 +462,9 @@ func addDFCToPodTemplate(podTemplateSpec *corev1.PodTemplateSpec, sparkRef corev } // getSplunkStatefulSet returns a Kubernetes StatefulSet object for Splunk instances configured for a Splunk Enterprise resource. -func getSplunkStatefulSet(cr enterprisev1.MetaObject, spec *enterprisev1.CommonSplunkSpec, instanceType InstanceType, replicas int, extraEnv []corev1.EnvVar) (*appsv1.StatefulSet, error) { +func getSplunkStatefulSet(cr enterprisev1.MetaObject, spec *enterprisev1.CommonSplunkSpec, instanceType InstanceType, replicas int32, extraEnv []corev1.EnvVar) (*appsv1.StatefulSet, error) { // prepare misc values - replicas32 := int32(replicas) ports := resources.SortContainerPorts(getSplunkContainerPorts(instanceType)) // note that port order is important for tests annotations := resources.GetIstioAnnotations(ports) selectLabels := getSplunkLabels(cr.GetIdentifier(), instanceType) @@ -501,8 +500,11 @@ func getSplunkStatefulSet(cr enterprisev1.MetaObject, spec *enterprisev1.CommonS MatchLabels: selectLabels, }, ServiceName: GetSplunkServiceName(instanceType, cr.GetIdentifier(), true), - Replicas: &replicas32, - PodManagementPolicy: "Parallel", + Replicas: &replicas, + PodManagementPolicy: appsv1.ParallelPodManagement, + UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ + Type: appsv1.OnDeleteStatefulSetStrategyType, + }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: labels, @@ -651,10 +653,10 @@ func updateSplunkPodTemplateWithConfig(podTemplateSpec *corev1.PodTemplateSpec, var clusterMasterURL string if instanceType == SplunkIndexer { clusterMasterURL = GetSplunkServiceName(SplunkClusterMaster, cr.GetIdentifier(), false) - } else if instanceType != SplunkClusterMaster && spec.IndexerRef.Name != "" { - clusterMasterURL = GetSplunkServiceName(SplunkClusterMaster, spec.IndexerRef.Name, false) - if spec.IndexerRef.Namespace != "" { - clusterMasterURL = resources.GetServiceFQDN(spec.IndexerRef.Namespace, clusterMasterURL) + } else if instanceType != SplunkClusterMaster && spec.IndexerClusterRef.Name != "" { + clusterMasterURL = GetSplunkServiceName(SplunkClusterMaster, spec.IndexerClusterRef.Name, false) + if spec.IndexerClusterRef.Namespace != "" { + clusterMasterURL = resources.GetServiceFQDN(spec.IndexerClusterRef.Namespace, clusterMasterURL) } } if clusterMasterURL != "" { @@ -677,7 +679,7 @@ func updateSplunkPodTemplateWithConfig(podTemplateSpec *corev1.PodTemplateSpec, } // getSearchHeadExtraEnv returns extra environment variables used by search head clusters -func getSearchHeadExtraEnv(cr enterprisev1.MetaObject, replicas int) []corev1.EnvVar { +func getSearchHeadExtraEnv(cr enterprisev1.MetaObject, replicas int32) []corev1.EnvVar { return []corev1.EnvVar{ { Name: "SPLUNK_SEARCH_HEAD_URL", @@ -690,7 +692,7 @@ func getSearchHeadExtraEnv(cr enterprisev1.MetaObject, replicas int) []corev1.En } // getIndexerExtraEnv returns extra environment variables used by search head clusters -func getIndexerExtraEnv(cr enterprisev1.MetaObject, replicas int) []corev1.EnvVar { +func getIndexerExtraEnv(cr enterprisev1.MetaObject, replicas int32) []corev1.EnvVar { return []corev1.EnvVar{ { Name: "SPLUNK_INDEXER_URL", diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index d55429ab2..aabfa2101 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -41,7 +41,7 @@ func configTester(t *testing.T, method string, f func() (interface{}, error), wa } func TestGetIndexerStatefulSet(t *testing.T) { - cr := enterprisev1.Indexer{ + cr := enterprisev1.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -50,19 +50,19 @@ func TestGetIndexerStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := ValidateIndexerSpec(&cr.Spec); err != nil { - t.Errorf("ValidateIndexerSpec() returned error: %v", err) + if err := ValidateIndexerClusterSpec(&cr.Spec); err != nil { + t.Errorf("ValidateIndexerClusterSpec() returned error: %v", err) } return GetIndexerStatefulSet(&cr) } configTester(t, "GetIndexerStatefulSet()", f, want) } - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-indexer","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_indexer"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack1-cluster-master-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-indexer"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-indexer-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-indexer","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_indexer"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack1-cluster-master-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-indexer"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-indexer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"indexer","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-indexer-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } func TestGetSearchHeadStatefulSet(t *testing.T) { - cr := enterprisev1.SearchHead{ + cr := enterprisev1.SearchHeadCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -71,8 +71,8 @@ func TestGetSearchHeadStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := ValidateSearchHeadSpec(&cr.Spec); err != nil { - t.Errorf("ValidateSearchHeadSpec() returned error: %v", err) + if err := ValidateSearchHeadClusterSpec(&cr.Spec); err != nil { + t.Errorf("ValidateSearchHeadClusterSpec() returned error: %v", err) } return GetSearchHeadStatefulSet(&cr) } @@ -80,19 +80,19 @@ func TestGetSearchHeadStatefulSet(t *testing.T) { } cr.Spec.Replicas = 3 - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) cr.Spec.Replicas = 4 - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":4,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-3.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":4,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-3.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) cr.Spec.Replicas = 5 - cr.Spec.IndexerRef.Name = "stack1" - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":5,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-3.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-4.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack1-cluster-master-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + cr.Spec.IndexerClusterRef.Name = "stack1" + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":5,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-3.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-4.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack1-cluster-master-service"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) cr.Spec.Replicas = 6 cr.Spec.SparkRef.Name = cr.GetIdentifier() - cr.Spec.IndexerRef.Namespace = "test2" - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":6,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}},{"name":"mnt-splunk-jdk","emptyDir":{}},{"name":"mnt-splunk-spark","emptyDir":{}}],"initContainers":[{"name":"init","image":"splunk/spark","command":["bash","-c","cp -r /opt/jdk /mnt \u0026\u0026 cp -r /opt/spark /mnt"],"resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"250m","memory":"128Mi"}},"volumeMounts":[{"name":"mnt-splunk-jdk","mountPath":"/mnt/jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/spark"}],"imagePullPolicy":"IfNotPresent"}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-3.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-4.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-5.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack1-cluster-master-service.test2.svc.cluster.local"},{"name":"SPLUNK_ENABLE_DFS","value":"true"},{"name":"SPARK_MASTER_HOST","value":"splunk-stack1-spark-master-service"},{"name":"SPARK_MASTER_WEBUI_PORT","value":"8009"},{"name":"SPARK_HOME","value":"/mnt/splunk-spark"},{"name":"JAVA_HOME","value":"/mnt/splunk-jdk"},{"name":"SPLUNK_DFW_NUM_SLOTS_ENABLED","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-jdk","mountPath":"/mnt/splunk-jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/splunk-spark"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + cr.Spec.IndexerClusterRef.Namespace = "test2" + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-search-head","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":6,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}},{"name":"mnt-splunk-jdk","emptyDir":{}},{"name":"mnt-splunk-spark","emptyDir":{}}],"initContainers":[{"name":"init","image":"splunk/spark","command":["bash","-c","cp -r /opt/jdk /mnt \u0026\u0026 cp -r /opt/spark /mnt"],"resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"250m","memory":"128Mi"}},"volumeMounts":[{"name":"mnt-splunk-jdk","mountPath":"/mnt/jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/spark"}],"imagePullPolicy":"IfNotPresent"}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_search_head"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-3.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-4.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-5.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_DEPLOYER_URL","value":"splunk-stack1-deployer-service"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack1-cluster-master-service.test2.svc.cluster.local"},{"name":"SPLUNK_ENABLE_DFS","value":"true"},{"name":"SPARK_MASTER_HOST","value":"splunk-stack1-spark-master-service"},{"name":"SPARK_MASTER_WEBUI_PORT","value":"8009"},{"name":"SPARK_HOME","value":"/mnt/splunk-spark"},{"name":"JAVA_HOME","value":"/mnt/splunk-jdk"},{"name":"SPLUNK_DFW_NUM_SLOTS_ENABLED","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-jdk","mountPath":"/mnt/splunk-jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/splunk-spark"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-search-head"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-search-head","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"search-head","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-search-head-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } func TestGetStandaloneStatefulSet(t *testing.T) { @@ -113,12 +113,12 @@ func TestGetStandaloneStatefulSet(t *testing.T) { configTester(t, "GetStandaloneStatefulSet()", f, want) } - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) cr.Spec.SparkRef.Name = cr.GetIdentifier() - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secrets"}},{"name":"mnt-splunk-jdk","emptyDir":{}},{"name":"mnt-splunk-spark","emptyDir":{}}],"initContainers":[{"name":"init","image":"splunk/spark","command":["bash","-c","cp -r /opt/jdk /mnt \u0026\u0026 cp -r /opt/spark /mnt"],"resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"250m","memory":"128Mi"}},"volumeMounts":[{"name":"mnt-splunk-jdk","mountPath":"/mnt/jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/spark"}],"imagePullPolicy":"IfNotPresent"}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_ENABLE_DFS","value":"true"},{"name":"SPARK_MASTER_HOST","value":"splunk-stack1-spark-master-service"},{"name":"SPARK_MASTER_WEBUI_PORT","value":"8009"},{"name":"SPARK_HOME","value":"/mnt/splunk-spark"},{"name":"JAVA_HOME","value":"/mnt/splunk-jdk"},{"name":"SPLUNK_DFW_NUM_SLOTS_ENABLED","value":"false"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-jdk","mountPath":"/mnt/splunk-jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/splunk-spark"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secrets"}},{"name":"mnt-splunk-jdk","emptyDir":{}},{"name":"mnt-splunk-spark","emptyDir":{}}],"initContainers":[{"name":"init","image":"splunk/spark","command":["bash","-c","cp -r /opt/jdk /mnt \u0026\u0026 cp -r /opt/spark /mnt"],"resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"250m","memory":"128Mi"}},"volumeMounts":[{"name":"mnt-splunk-jdk","mountPath":"/mnt/jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/spark"}],"imagePullPolicy":"IfNotPresent"}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_ENABLE_DFS","value":"true"},{"name":"SPARK_MASTER_HOST","value":"splunk-stack1-spark-master-service"},{"name":"SPARK_MASTER_WEBUI_PORT","value":"8009"},{"name":"SPARK_HOME","value":"/mnt/splunk-spark"},{"name":"JAVA_HOME","value":"/mnt/splunk-jdk"},{"name":"SPLUNK_DFW_NUM_SLOTS_ENABLED","value":"false"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-jdk","mountPath":"/mnt/splunk-jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/splunk-spark"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) - cr.Spec.IndexerRef.Name = "stack2" + cr.Spec.IndexerClusterRef.Name = "stack2" cr.Spec.StorageClassName = "gp2" cr.Spec.SchedulerName = "custom-scheduler" cr.Spec.Defaults = "defaults-string" @@ -126,7 +126,7 @@ func TestGetStandaloneStatefulSet(t *testing.T) { cr.Spec.Volumes = []corev1.Volume{ {Name: "defaults"}, } - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"defaults"},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secrets"}},{"name":"mnt-splunk-defaults","configMap":{"name":"splunk-stack1-standalone-defaults"}},{"name":"mnt-splunk-jdk","emptyDir":{}},{"name":"mnt-splunk-spark","emptyDir":{}}],"initContainers":[{"name":"init","image":"splunk/spark","command":["bash","-c","cp -r /opt/jdk /mnt \u0026\u0026 cp -r /opt/spark /mnt"],"resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"250m","memory":"128Mi"}},"volumeMounts":[{"name":"mnt-splunk-jdk","mountPath":"/mnt/jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/spark"}],"imagePullPolicy":"IfNotPresent"}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml,/mnt/defaults/defaults.yml,/mnt/splunk-defaults/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack2-cluster-master-service"},{"name":"SPLUNK_ENABLE_DFS","value":"true"},{"name":"SPARK_MASTER_HOST","value":"splunk-stack1-spark-master-service"},{"name":"SPARK_MASTER_WEBUI_PORT","value":"8009"},{"name":"SPARK_HOME","value":"/mnt/splunk-spark"},{"name":"JAVA_HOME","value":"/mnt/splunk-jdk"},{"name":"SPLUNK_DFW_NUM_SLOTS_ENABLED","value":"false"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"defaults","mountPath":"/mnt/defaults"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-defaults","mountPath":"/mnt/splunk-defaults"},{"name":"mnt-splunk-jdk","mountPath":"/mnt/splunk-jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/splunk-spark"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"custom-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-standalone","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"defaults"},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-standalone-secrets"}},{"name":"mnt-splunk-defaults","configMap":{"name":"splunk-stack1-standalone-defaults"}},{"name":"mnt-splunk-jdk","emptyDir":{}},{"name":"mnt-splunk-spark","emptyDir":{}}],"initContainers":[{"name":"init","image":"splunk/spark","command":["bash","-c","cp -r /opt/jdk /mnt \u0026\u0026 cp -r /opt/spark /mnt"],"resources":{"limits":{"cpu":"1","memory":"512Mi"},"requests":{"cpu":"250m","memory":"128Mi"}},"volumeMounts":[{"name":"mnt-splunk-jdk","mountPath":"/mnt/jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/spark"}],"imagePullPolicy":"IfNotPresent"}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"hec","containerPort":8088,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"},{"name":"dfsmaster","containerPort":9000,"protocol":"TCP"},{"name":"s2s","containerPort":9997,"protocol":"TCP"},{"name":"dfccontrol","containerPort":17000,"protocol":"TCP"},{"name":"datarecieve","containerPort":19000,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml,/mnt/defaults/defaults.yml,/mnt/splunk-defaults/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"splunk-stack2-cluster-master-service"},{"name":"SPLUNK_ENABLE_DFS","value":"true"},{"name":"SPARK_MASTER_HOST","value":"splunk-stack1-spark-master-service"},{"name":"SPARK_MASTER_WEBUI_PORT","value":"8009"},{"name":"SPARK_HOME","value":"/mnt/splunk-spark"},{"name":"JAVA_HOME","value":"/mnt/splunk-jdk"},{"name":"SPLUNK_DFW_NUM_SLOTS_ENABLED","value":"false"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"defaults","mountPath":"/mnt/defaults"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-splunk-defaults","mountPath":"/mnt/splunk-defaults"},{"name":"mnt-splunk-jdk","mountPath":"/mnt/splunk-jdk"},{"name":"mnt-splunk-spark","mountPath":"/mnt/splunk-spark"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-standalone"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"custom-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"standalone","app.kubernetes.io/instance":"splunk-stack1-standalone","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"standalone","app.kubernetes.io/part-of":"splunk-stack1-standalone"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"splunk-stack1-standalone-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } func TestGetLicenseMasterStatefulSet(t *testing.T) { @@ -147,14 +147,14 @@ func TestGetLicenseMasterStatefulSet(t *testing.T) { configTester(t, "GetLicenseMasterStatefulSet()", f, want) } - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-license-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-license-master-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_license_master"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-license-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-license-master-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-license-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-license-master-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_license_master"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-license-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-license-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) cr.Spec.LicenseURL = "/mnt/splunk.lic" - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-license-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-license-master-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_license_master"},{"name":"SPLUNK_LICENSE_URI","value":"/mnt/splunk.lic"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-license-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-license-master-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-license-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-license-master-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_license_master"},{"name":"SPLUNK_LICENSE_URI","value":"/mnt/splunk.lic"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-license-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-license-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } func TestGetClusterMasterStatefulSet(t *testing.T) { - cr := enterprisev1.Indexer{ + cr := enterprisev1.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -163,8 +163,8 @@ func TestGetClusterMasterStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := ValidateIndexerSpec(&cr.Spec); err != nil { - t.Errorf("ValidateSearchHeadSpec() returned error: %v", err) + if err := ValidateIndexerClusterSpec(&cr.Spec); err != nil { + t.Errorf("ValidateSearchHeadClusterSpec() returned error: %v", err) } return GetClusterMasterStatefulSet(&cr) } @@ -172,21 +172,21 @@ func TestGetClusterMasterStatefulSet(t *testing.T) { } cr.Spec.Replicas = 1 - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-cluster-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_cluster_master"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-cluster-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-cluster-master-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-cluster-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_cluster_master"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-cluster-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-cluster-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) cr.Spec.Replicas = 2 cr.Spec.LicenseMasterRef.Name = "stack1" cr.Spec.LicenseMasterRef.Namespace = "test" - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-cluster-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_cluster_master"},{"name":"SPLUNK_LICENSE_MASTER_URL","value":"splunk-stack1-license-master-service.test.svc.cluster.local"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local,splunk-stack1-indexer-1.splunk-stack1-indexer-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-cluster-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-cluster-master-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-cluster-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_cluster_master"},{"name":"SPLUNK_LICENSE_MASTER_URL","value":"splunk-stack1-license-master-service.test.svc.cluster.local"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local,splunk-stack1-indexer-1.splunk-stack1-indexer-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-cluster-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-cluster-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) cr.Spec.Replicas = 3 cr.Spec.LicenseMasterRef.Name = "" cr.Spec.LicenseURL = "/mnt/splunk.lic" - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-cluster-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_cluster_master"},{"name":"SPLUNK_LICENSE_URI","value":"/mnt/splunk.lic"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local,splunk-stack1-indexer-1.splunk-stack1-indexer-headless.test.svc.cluster.local,splunk-stack1-indexer-2.splunk-stack1-indexer-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-cluster-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-cluster-master-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-cluster-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-indexer-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_cluster_master"},{"name":"SPLUNK_LICENSE_URI","value":"/mnt/splunk.lic"},{"name":"SPLUNK_INDEXER_URL","value":"splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local,splunk-stack1-indexer-1.splunk-stack1-indexer-headless.test.svc.cluster.local,splunk-stack1-indexer-2.splunk-stack1-indexer-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-cluster-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-stack1-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-stack1-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-cluster-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } func TestGetDeployerStatefulSet(t *testing.T) { - cr := enterprisev1.SearchHead{ + cr := enterprisev1.SearchHeadCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -195,8 +195,8 @@ func TestGetDeployerStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := ValidateSearchHeadSpec(&cr.Spec); err != nil { - t.Errorf("ValidateSearchHeadSpec() returned error: %v", err) + if err := ValidateSearchHeadClusterSpec(&cr.Spec); err != nil { + t.Errorf("ValidateSearchHeadClusterSpec() returned error: %v", err) } return GetDeployerStatefulSet(&cr) } @@ -204,11 +204,11 @@ func TestGetDeployerStatefulSet(t *testing.T) { } cr.Spec.Replicas = 3 - test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-deployer","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_deployer"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-deployer"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"1Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"200Gi"}}},"status":{}}],"serviceName":"splunk-stack1-deployer-headless","podManagementPolicy":"Parallel","updateStrategy":{}},"status":{"replicas":0}}`) + test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-deployer","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997,7777,9000,17000,17500,19000","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-search-head-secrets"}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_deployer"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-deployer"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-deployer-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } func TestGetSplunkService(t *testing.T) { - cr := enterprisev1.Indexer{ + cr := enterprisev1.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -235,12 +235,12 @@ func TestGetSplunkService(t *testing.T) { } func TestGetSplunkDefaults(t *testing.T) { - cr := enterprisev1.Indexer{ + cr := enterprisev1.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.IndexerSpec{ + Spec: enterprisev1.IndexerClusterSpec{ CommonSplunkSpec: enterprisev1.CommonSplunkSpec{Defaults: "defaults_string"}, }, } @@ -256,7 +256,7 @@ func TestGetSplunkDefaults(t *testing.T) { } func TestGetSplunkSecrets(t *testing.T) { - cr := enterprisev1.Indexer{ + cr := enterprisev1.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", diff --git a/pkg/splunk/enterprise/names.go b/pkg/splunk/enterprise/names.go index 4e809ea8d..978107375 100644 --- a/pkg/splunk/enterprise/names.go +++ b/pkg/splunk/enterprise/names.go @@ -62,7 +62,7 @@ func GetSplunkStatefulsetName(instanceType InstanceType, identifier string) stri } // GetSplunkStatefulsetPodName uses a template to name a specific pod within a Kubernetes StatefulSet for Splunk instances. -func GetSplunkStatefulsetPodName(instanceType InstanceType, identifier string, index int) string { +func GetSplunkStatefulsetPodName(instanceType InstanceType, identifier string, index int32) string { return fmt.Sprintf(statefulSetPodTemplateStr, identifier, instanceType, index) } @@ -90,16 +90,16 @@ func GetSplunkDefaultsName(identifier string, instanceType InstanceType) string } // GetSplunkStatefulsetUrls returns a list of fully qualified domain names for all pods within a Splunk StatefulSet. -func GetSplunkStatefulsetUrls(namespace string, instanceType InstanceType, identifier string, replicas int, hostnameOnly bool) string { +func GetSplunkStatefulsetUrls(namespace string, instanceType InstanceType, identifier string, replicas int32, hostnameOnly bool) string { urls := make([]string, replicas) - for i := 0; i < replicas; i++ { + for i := int32(0); i < replicas; i++ { urls[i] = GetSplunkStatefulsetURL(namespace, instanceType, identifier, i, hostnameOnly) } return strings.Join(urls, ",") } // GetSplunkStatefulsetURL returns a fully qualified domain name for a specific pod within a Kubernetes StatefulSet Splunk instances. -func GetSplunkStatefulsetURL(namespace string, instanceType InstanceType, identifier string, index int, hostnameOnly bool) string { +func GetSplunkStatefulsetURL(namespace string, instanceType InstanceType, identifier string, index int32, hostnameOnly bool) string { podName := GetSplunkStatefulsetPodName(instanceType, identifier, index) if hostnameOnly { @@ -121,7 +121,7 @@ func GetSplunkImage(specImage string) string { if specImage != "" { name = specImage } else { - name = os.Getenv("SPLUNK_IMAGE") + name = os.Getenv("RELATED_IMAGE_SPLUNK_ENTERPRISE") if name == "" { name = defaultSplunkImage } diff --git a/pkg/splunk/enterprise/names_test.go b/pkg/splunk/enterprise/names_test.go index 6614d84ce..f816a7d5f 100644 --- a/pkg/splunk/enterprise/names_test.go +++ b/pkg/splunk/enterprise/names_test.go @@ -73,7 +73,7 @@ func TestGetSplunkDefaultsName(t *testing.T) { } func TestGetSplunkStatefulsetUrls(t *testing.T) { - test := func(want string, namespace string, instanceType InstanceType, identifier string, replicas int, hostnameOnly bool) { + test := func(want string, namespace string, instanceType InstanceType, identifier string, replicas int32, hostnameOnly bool) { got := GetSplunkStatefulsetUrls(namespace, instanceType, identifier, replicas, hostnameOnly) if got != want { t.Errorf("GetSplunkStatefulsetUrls(\"%s\",\"%s\",\"%s\",%d,%t) = %s; want %s", @@ -99,7 +99,7 @@ func TestGetSplunkImage(t *testing.T) { test("splunk/splunk") - os.Setenv("SPLUNK_IMAGE", "splunk-test/splunk") + os.Setenv("RELATED_IMAGE_SPLUNK_ENTERPRISE", "splunk-test/splunk") test("splunk-test/splunk") specImage = "splunk/splunk-test" diff --git a/pkg/splunk/reconcile/config.go b/pkg/splunk/reconcile/config.go index 2e598df28..3fcb75dc7 100644 --- a/pkg/splunk/reconcile/config.go +++ b/pkg/splunk/reconcile/config.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "context" @@ -27,17 +27,17 @@ import ( "github.com/splunk/splunk-operator/pkg/splunk/resources" ) -// ReconcileSplunkConfig reconciles the state of Kubernetes Secrets, ConfigMaps and other general settings for Splunk Enterprise instances. -func ReconcileSplunkConfig(client ControllerClient, cr enterprisev1.MetaObject, spec enterprisev1.CommonSplunkSpec, instanceType enterprise.InstanceType) error { +// ApplySplunkConfig reconciles the state of Kubernetes Secrets, ConfigMaps and other general settings for Splunk Enterprise instances. +func ApplySplunkConfig(client ControllerClient, cr enterprisev1.MetaObject, spec enterprisev1.CommonSplunkSpec, instanceType enterprise.InstanceType) (*corev1.Secret, error) { var err error // if reference to indexer cluster, extract and re-use idxc.secret // IndexerRef is not relevant for Indexer, and Indexer will use value from LicenseMaster to prevent cyclical dependency var idxcSecret []byte - if instanceType.ToKind() != "indexer" && instanceType.ToKind() != "license-master" && spec.IndexerRef.Name != "" { - idxcSecret, err = GetSplunkSecret(client, cr, spec.IndexerRef, enterprise.SplunkIndexer, "idxc_secret") + if instanceType.ToKind() != "indexer" && instanceType.ToKind() != "license-master" && spec.IndexerClusterRef.Name != "" { + idxcSecret, err = GetSplunkSecret(client, cr, spec.IndexerClusterRef, enterprise.SplunkIndexer, "idxc_secret") if err != nil { - return err + return nil, err } } @@ -46,22 +46,22 @@ func ReconcileSplunkConfig(client ControllerClient, cr enterprisev1.MetaObject, if instanceType.ToKind() != "license-master" && spec.LicenseMasterRef.Name != "" { pass4SymmKey, err = GetSplunkSecret(client, cr, spec.LicenseMasterRef, enterprise.SplunkLicenseMaster, "pass4SymmKey") if err != nil { - return err + return nil, err } if instanceType.ToKind() == "indexer" { // get pass4SymmKey from LicenseMaster to avoid cyclical dependency idxcSecret, err = GetSplunkSecret(client, cr, spec.LicenseMasterRef, enterprise.SplunkLicenseMaster, "idxc_secret") if err != nil { - return err + return nil, err } } } - // create splunk secrets + // create or retrieve splunk secrets secrets := enterprise.GetSplunkSecrets(cr, instanceType, idxcSecret, pass4SymmKey) secrets.SetOwnerReferences(append(secrets.GetOwnerReferences(), resources.AsOwner(cr))) - if err = ApplySecret(client, secrets); err != nil { - return err + if secrets, err = ApplySecret(client, secrets); err != nil { + return nil, err } // create splunk defaults (for inline config) @@ -69,11 +69,11 @@ func ReconcileSplunkConfig(client ControllerClient, cr enterprisev1.MetaObject, defaultsMap := enterprise.GetSplunkDefaults(cr.GetIdentifier(), cr.GetNamespace(), instanceType, spec.Defaults) defaultsMap.SetOwnerReferences(append(defaultsMap.GetOwnerReferences(), resources.AsOwner(cr))) if err = ApplyConfigMap(client, defaultsMap); err != nil { - return err + return nil, err } } - return nil + return secrets, nil } // ApplyConfigMap creates or updates a Kubernetes ConfigMap @@ -101,14 +101,15 @@ func ApplyConfigMap(client ControllerClient, configMap *corev1.ConfigMap) error return err } -// ApplySecret creates or updates a Kubernetes Secret -func ApplySecret(client ControllerClient, secret *corev1.Secret) error { +// ApplySecret creates or updates a Kubernetes Secret, and returns active secrets if successful +func ApplySecret(client ControllerClient, secret *corev1.Secret) (*corev1.Secret, error) { scopedLog := log.WithName("ApplySecret").WithValues( "name", secret.GetObjectMeta().GetName(), "namespace", secret.GetObjectMeta().GetNamespace()) namespacedName := types.NamespacedName{Namespace: secret.GetNamespace(), Name: secret.GetName()} var current corev1.Secret + result := ¤t err := client.Get(context.TODO(), namespacedName, ¤t) if err == nil { @@ -116,9 +117,10 @@ func ApplySecret(client ControllerClient, secret *corev1.Secret) error { scopedLog.Info("Found existing Secret") } else { err = CreateResource(client, secret) + result = secret } - return err + return result, err } // GetSplunkSecret is used to retrieve a secret from another custom resource. diff --git a/pkg/splunk/reconcile/config_test.go b/pkg/splunk/reconcile/config_test.go index 6b804726d..154010450 100644 --- a/pkg/splunk/reconcile/config_test.go +++ b/pkg/splunk/reconcile/config_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "testing" @@ -23,14 +23,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func TestReconcileSplunkConfig(t *testing.T) { +func TestApplySplunkConfig(t *testing.T) { funcCalls := []mockFuncCall{ {metaName: "*v1.Secret-test-splunk-stack1-search-head-secrets"}, {metaName: "*v1.ConfigMap-test-splunk-stack1-search-head-defaults"}, } createCalls := map[string][]mockFuncCall{"Get": funcCalls, "Create": funcCalls} updateCalls := map[string][]mockFuncCall{"Get": funcCalls} - searchHeadCR := enterprisev1.SearchHead{ + searchHeadCR := enterprisev1.SearchHeadCluster{ TypeMeta: metav1.TypeMeta{ Kind: "SearcHead", }, @@ -43,10 +43,11 @@ func TestReconcileSplunkConfig(t *testing.T) { searchHeadRevised := searchHeadCR.DeepCopy() searchHeadRevised.Spec.Image = "splunk/test" reconcile := func(c *mockClient, cr interface{}) error { - obj := cr.(*enterprisev1.SearchHead) - return ReconcileSplunkConfig(c, obj, obj.Spec.CommonSplunkSpec, enterprise.SplunkSearchHead) + obj := cr.(*enterprisev1.SearchHeadCluster) + _, err := ApplySplunkConfig(c, obj, obj.Spec.CommonSplunkSpec, enterprise.SplunkSearchHead) + return err } - reconcileTester(t, "TestReconcileSplunkConfig", &searchHeadCR, searchHeadRevised, createCalls, updateCalls, reconcile) + reconcileTester(t, "TestApplySplunkConfig", &searchHeadCR, searchHeadRevised, createCalls, updateCalls, reconcile) // test search head with indexer reference secret := corev1.Secret{ @@ -58,18 +59,18 @@ func TestReconcileSplunkConfig(t *testing.T) { "idxc_secret": []byte{'a', 'b'}, }, } - searchHeadRevised.Spec.IndexerRef.Name = "stack2" + searchHeadRevised.Spec.IndexerClusterRef.Name = "stack2" updateCalls["Get"] = []mockFuncCall{ {metaName: "*v1.Secret-test-splunk-stack2-indexer-secrets"}, {metaName: "*v1.Secret-test-splunk-stack1-search-head-secrets"}, {metaName: "*v1.ConfigMap-test-splunk-stack1-search-head-defaults"}, } - reconcileTester(t, "TestReconcileSplunkConfig", &searchHeadCR, searchHeadRevised, createCalls, updateCalls, reconcile, &secret) + reconcileTester(t, "TestApplySplunkConfig", &searchHeadCR, searchHeadRevised, createCalls, updateCalls, reconcile, &secret) // test indexer with license master - indexerCR := enterprisev1.Indexer{ + indexerCR := enterprisev1.IndexerCluster{ TypeMeta: metav1.TypeMeta{ - Kind: "Indexer", + Kind: "IndexerCluster", }, ObjectMeta: metav1.ObjectMeta{ Name: "stack1", @@ -90,8 +91,9 @@ func TestReconcileSplunkConfig(t *testing.T) { indexerRevised.Spec.Image = "splunk/test" indexerRevised.Spec.LicenseMasterRef.Name = "stack2" reconcile = func(c *mockClient, cr interface{}) error { - obj := cr.(*enterprisev1.Indexer) - return ReconcileSplunkConfig(c, obj, obj.Spec.CommonSplunkSpec, enterprise.SplunkIndexer) + obj := cr.(*enterprisev1.IndexerCluster) + _, err := ApplySplunkConfig(c, obj, obj.Spec.CommonSplunkSpec, enterprise.SplunkIndexer) + return err } funcCalls = []mockFuncCall{ {metaName: "*v1.Secret-test-splunk-stack2-license-master-secrets"}, @@ -100,7 +102,7 @@ func TestReconcileSplunkConfig(t *testing.T) { } createCalls = map[string][]mockFuncCall{"Get": {funcCalls[2]}, "Create": {funcCalls[2]}} updateCalls = map[string][]mockFuncCall{"Get": funcCalls} - reconcileTester(t, "TestReconcileSplunkConfig", &indexerCR, indexerRevised, createCalls, updateCalls, reconcile, &secret) + reconcileTester(t, "TestApplySplunkConfig", &indexerCR, indexerRevised, createCalls, updateCalls, reconcile, &secret) } func TestApplyConfigMap(t *testing.T) { @@ -134,7 +136,8 @@ func TestApplySecret(t *testing.T) { revised := current.DeepCopy() revised.Data = map[string][]byte{"a": []byte{'1', '2'}} reconcile := func(c *mockClient, cr interface{}) error { - return ApplySecret(c, cr.(*corev1.Secret)) + _, err := ApplySecret(c, cr.(*corev1.Secret)) + return err } reconcileTester(t, "TestApplySecret", ¤t, revised, createCalls, updateCalls, reconcile) } diff --git a/pkg/splunk/reconcile/deployment.go b/pkg/splunk/reconcile/deployment.go index b3667a42e..0569d9e1a 100644 --- a/pkg/splunk/reconcile/deployment.go +++ b/pkg/splunk/reconcile/deployment.go @@ -12,63 +12,72 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "context" + "fmt" + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/types" ) // ApplyDeployment creates or updates a Kubernetes Deployment -func ApplyDeployment(client ControllerClient, deployment *appsv1.Deployment) error { +func ApplyDeployment(c ControllerClient, revised *appsv1.Deployment) (enterprisev1.ResourcePhase, error) { scopedLog := log.WithName("ApplyDeployment").WithValues( - "name", deployment.GetObjectMeta().GetName(), - "namespace", deployment.GetObjectMeta().GetNamespace()) + "name", revised.GetObjectMeta().GetName(), + "namespace", revised.GetObjectMeta().GetNamespace()) - namespacedName := types.NamespacedName{Namespace: deployment.GetNamespace(), Name: deployment.GetName()} + namespacedName := types.NamespacedName{Namespace: revised.GetNamespace(), Name: revised.GetName()} var current appsv1.Deployment - err := client.Get(context.TODO(), namespacedName, ¤t) - if err == nil { - // found existing Deployment - if MergeDeploymentUpdates(¤t, deployment) { - // only update if there are material differences, as determined by comparison function - err = UpdateResource(client, ¤t) - } else { - scopedLog.Info("No changes for Deployment") - } - } else { - err = CreateResource(client, deployment) + err := c.Get(context.TODO(), namespacedName, ¤t) + if err != nil { + return enterprisev1.PhasePending, CreateResource(c, revised) } - return err -} + // found an existing Deployment -// MergeDeploymentUpdates looks for material differences between a -// Deployment's current config and a revised config. It merges material -// changes from revised to current. This enables us to minimize updates. -// It returns true if there are material differences between them, or false otherwise. -func MergeDeploymentUpdates(current *appsv1.Deployment, revised *appsv1.Deployment) bool { - scopedLog := log.WithName("MergeDeploymentUpdates").WithValues( - "name", current.GetObjectMeta().GetName(), - "namespace", current.GetObjectMeta().GetNamespace()) - result := false + // check for changes in Pod template + hasUpdates := MergePodUpdates(¤t.Spec.Template, &revised.Spec.Template, current.GetObjectMeta().GetName()) + desiredReplicas := *revised.Spec.Replicas + *revised = current // caller expects that object passed represents latest state - // check for change in Replicas count - if current.Spec.Replicas != nil && revised.Spec.Replicas != nil && *current.Spec.Replicas != *revised.Spec.Replicas { - scopedLog.Info("Deployment Replicas differ", - "current", *current.Spec.Replicas, - "revised", *revised.Spec.Replicas) - current.Spec.Replicas = revised.Spec.Replicas - result = true + // check for scaling + if revised.Spec.Replicas != nil { + if *revised.Spec.Replicas < desiredReplicas { + scopedLog.Info(fmt.Sprintf("Scaling replicas up to %d", desiredReplicas)) + *revised.Spec.Replicas = desiredReplicas + return enterprisev1.PhaseScalingUp, UpdateResource(c, revised) + } else if *revised.Spec.Replicas > desiredReplicas { + scopedLog.Info(fmt.Sprintf("Scaling replicas down to %d", desiredReplicas)) + *revised.Spec.Replicas = desiredReplicas + return enterprisev1.PhaseScalingDown, UpdateResource(c, revised) + } } - // check for changes in Pod template - if MergePodUpdates(¤t.Spec.Template, &revised.Spec.Template, current.GetObjectMeta().GetName()) { - result = true + // only update if there are material differences, as determined by comparison function + if hasUpdates { + return enterprisev1.PhaseUpdating, UpdateResource(c, revised) + } + + // check if updates are in progress + if revised.Status.UpdatedReplicas < revised.Status.Replicas { + scopedLog.Info("Waiting for updates to complete") + return enterprisev1.PhaseUpdating, nil + } + + // check if replicas are not yet ready + if revised.Status.ReadyReplicas < desiredReplicas { + scopedLog.Info("Waiting for pods to become ready") + if revised.Status.ReadyReplicas > 0 { + return enterprisev1.PhaseScalingUp, nil + } + return enterprisev1.PhasePending, nil } - return result + // all is good! + scopedLog.Info("All pods are ready") + return enterprisev1.PhaseReady, nil } diff --git a/pkg/splunk/reconcile/deployment_test.go b/pkg/splunk/reconcile/deployment_test.go index 93febbb0b..eec86a39d 100644 --- a/pkg/splunk/reconcile/deployment_test.go +++ b/pkg/splunk/reconcile/deployment_test.go @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "testing" + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -34,11 +35,86 @@ func TestApplyDeployment(t *testing.T) { Spec: appsv1.DeploymentSpec{ Replicas: &replicas, }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + UpdatedReplicas: 1, + }, + } + wantPhases := []enterprisev1.ResourcePhase{ + enterprisev1.PhasePending, + enterprisev1.PhaseReady, + enterprisev1.PhaseUpdating, } + wantPhaseNum := 0 + reconcile := func(c *mockClient, cr interface{}) error { + gotPhase, err := ApplyDeployment(c, cr.(*appsv1.Deployment)) + if gotPhase != wantPhases[wantPhaseNum] { + t.Errorf("TestApplyDeployment() got phase[%d] = %s; want %s", wantPhaseNum, gotPhase, wantPhases[wantPhaseNum]) + } + wantPhaseNum++ + return err + } + + // test update revised := current.DeepCopy() + revised.Spec.Template.ObjectMeta.Labels = map[string]string{"one": "two"} + //reconcileTester(t, "TestApplyDeployment", ¤t, revised, createCalls, updateCalls, reconcile) + + // test scale up + revised = current.DeepCopy() *revised.Spec.Replicas = 3 - reconcile := func(c *mockClient, cr interface{}) error { - return ApplyDeployment(c, cr.(*appsv1.Deployment)) + wantPhases = []enterprisev1.ResourcePhase{ + enterprisev1.PhasePending, + enterprisev1.PhaseReady, + enterprisev1.PhaseScalingUp, } + wantPhaseNum = 0 reconcileTester(t, "TestApplyDeployment", ¤t, revised, createCalls, updateCalls, reconcile) + + // test scale down + *current.Spec.Replicas = 5 + current.Status.Replicas = 5 + current.Status.ReadyReplicas = 5 + current.Status.UpdatedReplicas = 5 + revised = current.DeepCopy() + *revised.Spec.Replicas = 3 + wantPhases = []enterprisev1.ResourcePhase{ + enterprisev1.PhasePending, + enterprisev1.PhaseReady, + enterprisev1.PhaseScalingDown, + } + wantPhaseNum = 0 + reconcileTester(t, "TestApplyDeployment", ¤t, revised, createCalls, updateCalls, reconcile) + + // check for no updates, except pending pod updates (in progress) + c := newMockClient() + c.state[getStateKey(¤t)] = ¤t + current.Status.Replicas = 5 + current.Status.ReadyReplicas = 5 + current.Status.UpdatedReplicas = 3 + wantPhase := enterprisev1.PhaseUpdating + gotPhase, err := ApplyDeployment(c, ¤t) + if gotPhase != wantPhase { + t.Errorf("TestApplyDeployment() got phase = %s; want %s", gotPhase, wantPhase) + } + if err != nil { + t.Errorf("TestApplyDeployment() returned error = %v; want nil", err) + } + + // check for no updates, except waiting for pods to become ready + c = newMockClient() + c.state[getStateKey(¤t)] = ¤t + *current.Spec.Replicas = 5 + current.Status.Replicas = 5 + current.Status.ReadyReplicas = 3 + current.Status.UpdatedReplicas = 5 + wantPhase = enterprisev1.PhaseScalingUp + gotPhase, err = ApplyDeployment(c, ¤t) + if gotPhase != wantPhase { + t.Errorf("TestApplyDeployment() got phase = %s; want %s", gotPhase, wantPhase) + } + if err != nil { + t.Errorf("TestApplyDeployment() returned error = %v; want nil", err) + } } diff --git a/pkg/splunk/reconcile/doc.go b/pkg/splunk/reconcile/doc.go index f1a89b599..db7d4b988 100644 --- a/pkg/splunk/reconcile/doc.go +++ b/pkg/splunk/reconcile/doc.go @@ -13,8 +13,8 @@ // limitations under the License. /* -Package deploy is used to manipulate Kubernetes resources using its REST API. +Package reconcile is used to manipulate Kubernetes resources using its REST API. Methods within this package are likely to change state and/or mutate data. -This package has dependencies on enterprise, spark and resources. +This package has dependencies on the enterprise, spark and resource modules. */ -package deploy +package reconcile diff --git a/pkg/splunk/reconcile/finalizers.go b/pkg/splunk/reconcile/finalizers.go index 0aeb7f47a..c3961714e 100644 --- a/pkg/splunk/reconcile/finalizers.go +++ b/pkg/splunk/reconcile/finalizers.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "context" @@ -81,9 +81,9 @@ func DeleteSplunkPvc(cr enterprisev1.MetaObject, c ControllerClient) error { component = "standalone" case "LicenseMaster": component = "license-master" - case "SearchHead": + case "SearchHeadCluster": component = "search-head" - case "Indexer": + case "IndexerCluster": component = "indexer" default: scopedLog.Info("Skipping PVC removal") diff --git a/pkg/splunk/reconcile/finalizers_test.go b/pkg/splunk/reconcile/finalizers_test.go index 06fa03834..969ef1551 100644 --- a/pkg/splunk/reconcile/finalizers_test.go +++ b/pkg/splunk/reconcile/finalizers_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "fmt" @@ -33,9 +33,9 @@ func splunkDeletionTester(t *testing.T, cr enterprisev1.MetaObject, delete func( component = "standalone" case "LicenseMaster": component = "license-master" - case "SearchHead": + case "SearchHeadCluster": component = "search-head" - case "Indexer": + case "IndexerCluster": component = "indexer" } @@ -84,9 +84,9 @@ func splunkDeletionTester(t *testing.T, cr enterprisev1.MetaObject, delete func( } func TestCheckSplunkDeletion(t *testing.T) { - cr := enterprisev1.Indexer{ + cr := enterprisev1.IndexerCluster{ TypeMeta: metav1.TypeMeta{ - Kind: "Indexer", + Kind: "IndexerCluster", }, ObjectMeta: metav1.ObjectMeta{ Name: "stack1", diff --git a/pkg/splunk/reconcile/indexer.go b/pkg/splunk/reconcile/indexer.go deleted file mode 100644 index c35a6e2cc..000000000 --- a/pkg/splunk/reconcile/indexer.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package deploy - -import ( - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" - "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// ReconcileIndexer reconciles the state of a Splunk Enterprise indexer cluster. -func ReconcileIndexer(client ControllerClient, cr *enterprisev1.Indexer) error { - - // validate and updates defaults for CR - err := enterprise.ValidateIndexerSpec(&cr.Spec) - if err != nil { - return err - } - - // check if deletion has been requested - if cr.ObjectMeta.DeletionTimestamp != nil { - _, err := CheckSplunkDeletion(cr, client) - return err - } - - // create or update general config resources - err = ReconcileSplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkIndexer) - if err != nil { - return err - } - - // create or update a headless service for indexer cluster - err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkIndexer, true)) - if err != nil { - return err - } - - // create or update a regular service for indexer cluster (ingestion) - err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkIndexer, false)) - if err != nil { - return err - } - - // create or update a regular service for the cluster master - err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkClusterMaster, false)) - if err != nil { - return err - } - - // create or update statefulset for the cluster master - statefulSet, err := enterprise.GetClusterMasterStatefulSet(cr) - if err != nil { - return err - } - err = ApplyStatefulSet(client, statefulSet) - if err != nil { - return err - } - - // create or update statefulset for the indexers - statefulSet, err = enterprise.GetIndexerStatefulSet(cr) - if err != nil { - return err - } - return ApplyStatefulSet(client, statefulSet) -} diff --git a/pkg/splunk/reconcile/indexer_test.go b/pkg/splunk/reconcile/indexer_test.go deleted file mode 100644 index a7e6efe45..000000000 --- a/pkg/splunk/reconcile/indexer_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package deploy - -import ( - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" -) - -func TestReconcileIndexer(t *testing.T) { - funcCalls := []mockFuncCall{ - {metaName: "*v1.Secret-test-splunk-stack1-indexer-secrets"}, - {metaName: "*v1.Service-test-splunk-stack1-indexer-headless"}, - {metaName: "*v1.Service-test-splunk-stack1-indexer-service"}, - {metaName: "*v1.Service-test-splunk-stack1-cluster-master-service"}, - {metaName: "*v1.StatefulSet-test-splunk-stack1-cluster-master"}, - {metaName: "*v1.StatefulSet-test-splunk-stack1-indexer"}, - } - createCalls := map[string][]mockFuncCall{"Get": funcCalls, "Create": funcCalls} - updateCalls := map[string][]mockFuncCall{"Get": funcCalls, "Update": []mockFuncCall{funcCalls[4], funcCalls[5]}} - - current := enterprisev1.Indexer{ - TypeMeta: metav1.TypeMeta{ - Kind: "Indexer", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "stack1", - Namespace: "test", - }, - } - revised := current.DeepCopy() - revised.Spec.Image = "splunk/test" - reconcile := func(c *mockClient, cr interface{}) error { - return ReconcileIndexer(c, cr.(*enterprisev1.Indexer)) - } - reconcileTester(t, "TestReconcileIndexer", ¤t, revised, createCalls, updateCalls, reconcile) - - // test deletion - currentTime := metav1.NewTime(time.Now()) - revised.ObjectMeta.DeletionTimestamp = ¤tTime - revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} - deleteFunc := func(cr enterprisev1.MetaObject, c ControllerClient) (bool, error) { - err := ReconcileIndexer(c, cr.(*enterprisev1.Indexer)) - return true, err - } - splunkDeletionTester(t, revised, deleteFunc) -} diff --git a/pkg/splunk/reconcile/indexercluster.go b/pkg/splunk/reconcile/indexercluster.go new file mode 100644 index 000000000..a3d84adde --- /dev/null +++ b/pkg/splunk/reconcile/indexercluster.go @@ -0,0 +1,273 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reconcile + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/go-logr/logr" + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + "github.com/splunk/splunk-operator/pkg/splunk/resources" +) + +// ApplyIndexerCluster reconciles the state of a Splunk Enterprise indexer cluster. +func ApplyIndexerCluster(client ControllerClient, cr *enterprisev1.IndexerCluster) (reconcile.Result, error) { + + // unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + scopedLog := log.WithName("ApplyIndexerCluster").WithValues("name", cr.GetIdentifier(), "namespace", cr.GetNamespace()) + + // validate and updates defaults for CR + err := enterprise.ValidateIndexerClusterSpec(&cr.Spec) + if err != nil { + return result, err + } + + // updates status after function completes + cr.Status.Phase = enterprisev1.PhaseError + cr.Status.ClusterMasterPhase = enterprisev1.PhaseError + cr.Status.Replicas = cr.Spec.Replicas + cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetIdentifier()) + defer func() { + client.Status().Update(context.TODO(), cr) + }() + + // check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := CheckSplunkDeletion(cr, client) + if terminating && err != nil { // don't bother if no error, since it will just be removed immmediately after + cr.Status.Phase = enterprisev1.PhaseTerminating + cr.Status.ClusterMasterPhase = enterprisev1.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + // create or update general config resources + secrets, err := ApplySplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkIndexer) + if err != nil { + return result, err + } + + // create or update a headless service for indexer cluster + err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkIndexer, true)) + if err != nil { + return result, err + } + + // create or update a regular service for indexer cluster (ingestion) + err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkIndexer, false)) + if err != nil { + return result, err + } + + // create or update a regular service for the cluster master + err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkClusterMaster, false)) + if err != nil { + return result, err + } + + // create or update statefulset for the cluster master + statefulSet, err := enterprise.GetClusterMasterStatefulSet(cr) + if err != nil { + return result, err + } + clusterMasterManager := DefaultStatefulSetPodManager{} + phase, err := clusterMasterManager.Update(client, statefulSet, 1) + if err != nil { + return result, err + } + cr.Status.ClusterMasterPhase = phase + + // create or update statefulset for the indexers + statefulSet, err = enterprise.GetIndexerStatefulSet(cr) + if err != nil { + return result, err + } + mgr := IndexerClusterPodManager{log: scopedLog, cr: cr, secrets: secrets, newSplunkClient: splclient.NewSplunkClient} + phase, err = mgr.Update(client, statefulSet, cr.Spec.Replicas) + if err != nil { + return result, err + } + cr.Status.Phase = phase + + // no need to requeue if everything is ready + if cr.Status.Phase == enterprisev1.PhaseReady { + result.Requeue = false + } + return result, nil +} + +// IndexerClusterPodManager is used to manage the pods within a search head cluster +type IndexerClusterPodManager struct { + log logr.Logger + cr *enterprisev1.IndexerCluster + secrets *corev1.Secret + newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient +} + +// Update for IndexerClusterPodManager handles all updates for a statefulset of indexers +func (mgr *IndexerClusterPodManager) Update(c ControllerClient, statefulSet *appsv1.StatefulSet, desiredReplicas int32) (enterprisev1.ResourcePhase, error) { + // update statefulset, if necessary + _, err := ApplyStatefulSet(c, statefulSet) + if err != nil { + return enterprisev1.PhaseError, err + } + + // update CR status with SHC information + err = mgr.updateStatus(statefulSet) + if err != nil || mgr.cr.Status.ReadyReplicas == 0 || !mgr.cr.Status.Initialized || !mgr.cr.Status.IndexingReady || !mgr.cr.Status.ServiceReady { + mgr.log.Error(err, "Indexer cluster is not ready") + return enterprisev1.PhasePending, nil + } + + // manage scaling and updates + return UpdateStatefulSetPods(c, statefulSet, mgr, desiredReplicas) +} + +// PrepareScaleDown for IndexerClusterPodManager prepares indexer pod to be removed via scale down event; it returns true when ready +func (mgr *IndexerClusterPodManager) PrepareScaleDown(n int32) (bool, error) { + // first, decommission indexer peer with enforceCounts=true; this will rebalance buckets across other peers + complete, err := mgr.decommission(n, true) + if err != nil { + return false, err + } + if !complete { + return false, nil + } + + // next, remove the peer + c := mgr.getClusterMasterClient() + return true, c.RemoveIndexerClusterPeer(mgr.cr.Status.Peers[n].ID) +} + +// PrepareRecycle for IndexerClusterPodManager prepares indexer pod to be recycled for updates; it returns true when ready +func (mgr *IndexerClusterPodManager) PrepareRecycle(n int32) (bool, error) { + return mgr.decommission(n, false) +} + +// FinishRecycle for IndexerClusterPodManager completes recycle event for indexer pod; it returns true when complete +func (mgr *IndexerClusterPodManager) FinishRecycle(n int32) (bool, error) { + return mgr.cr.Status.Peers[n].Status == "Up", nil +} + +// decommission for IndexerClusterPodManager decommissions an indexer pod; it returns true when ready +func (mgr *IndexerClusterPodManager) decommission(n int32, enforceCounts bool) (bool, error) { + peerName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkIndexer, mgr.cr.GetIdentifier(), n) + + switch mgr.cr.Status.Peers[n].Status { + case "Up": + mgr.log.Info("Decommissioning indexer cluster peer", "peerName", peerName, "enforceCounts", enforceCounts) + c := mgr.getClient(n) + return false, c.DecommissionIndexerClusterPeer(enforceCounts) + + case "Decommissioning": + mgr.log.Info("Waiting for decommission to complete", "peerName", peerName) + return false, nil + + case "ReassigningPrimaries": + mgr.log.Info("Waiting for decommission to complete", "peerName", peerName) + return false, nil + + case "GracefulShutdown": + mgr.log.Info("Decommission complete", "peerName", peerName, "Status", mgr.cr.Status.Peers[n].Status) + return true, nil + + case "Down": + mgr.log.Info("Decommission complete", "peerName", peerName, "Status", mgr.cr.Status.Peers[n].Status) + return true, nil + + case "": // this can happen after the peer has been removed from the indexer cluster + mgr.log.Info("Peer has empty ID", "peerName", peerName) + return false, nil + } + + // unhandled status + return false, fmt.Errorf("Status=%s", mgr.cr.Status.Peers[n].Status) +} + +// getClient for IndexerClusterPodManager returns a SplunkClient for the member n +func (mgr *IndexerClusterPodManager) getClient(n int32) *splclient.SplunkClient { + memberName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkIndexer, mgr.cr.GetIdentifier(), n) + fqdnName := resources.GetServiceFQDN(mgr.cr.GetNamespace(), + fmt.Sprintf("%s.%s", memberName, enterprise.GetSplunkServiceName(enterprise.SplunkIndexer, mgr.cr.GetIdentifier(), true))) + return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(mgr.secrets.Data["password"])) +} + +// getClusterMasterClient for IndexerClusterPodManager returns a SplunkClient for cluster master +func (mgr *IndexerClusterPodManager) getClusterMasterClient() *splclient.SplunkClient { + fqdnName := resources.GetServiceFQDN(mgr.cr.GetNamespace(), enterprise.GetSplunkServiceName(enterprise.SplunkClusterMaster, mgr.cr.GetIdentifier(), false)) + return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(mgr.secrets.Data["password"])) +} + +// updateStatus for IndexerClusterPodManager uses the REST API to update the status for a SearcHead custom resource +func (mgr *IndexerClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSet) error { + mgr.cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas + mgr.cr.Status.Peers = []enterprisev1.IndexerClusterMemberStatus{} + + if mgr.cr.Status.ClusterMasterPhase != enterprisev1.PhaseReady { + mgr.cr.Status.Initialized = false + mgr.cr.Status.IndexingReady = false + mgr.cr.Status.ServiceReady = false + mgr.cr.Status.MaintenanceMode = false + return fmt.Errorf("Waiting for cluster master to become ready") + } + + // get indexer cluster info from cluster master if it's ready + c := mgr.getClusterMasterClient() + clusterInfo, err := c.GetClusterMasterInfo() + if err != nil { + return err + } + mgr.cr.Status.Initialized = clusterInfo.Initialized + mgr.cr.Status.IndexingReady = clusterInfo.IndexingReady + mgr.cr.Status.ServiceReady = clusterInfo.ServiceReady + mgr.cr.Status.MaintenanceMode = clusterInfo.MaintenanceMode + + // get peer information from cluster master + peers, err := c.GetClusterMasterPeers() + if err != nil { + return err + } + for n := int32(0); n < statefulSet.Status.Replicas; n++ { + peerName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkIndexer, mgr.cr.GetIdentifier(), n) + peerStatus := enterprisev1.IndexerClusterMemberStatus{Name: peerName} + peerInfo, ok := peers[peerName] + if ok { + peerStatus.ID = peerInfo.ID + peerStatus.Status = peerInfo.Status + peerStatus.ActiveBundleID = peerInfo.ActiveBundleID + peerStatus.BucketCount = peerInfo.BucketCount + peerStatus.Searchable = peerInfo.Searchable + } else { + mgr.log.Info("Peer is not known by cluster master", "peerName", peerName) + } + mgr.cr.Status.Peers = append(mgr.cr.Status.Peers, peerStatus) + } + + return nil +} diff --git a/pkg/splunk/reconcile/indexercluster_test.go b/pkg/splunk/reconcile/indexercluster_test.go new file mode 100644 index 000000000..1c3f49f08 --- /dev/null +++ b/pkg/splunk/reconcile/indexercluster_test.go @@ -0,0 +1,211 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reconcile + +import ( + "strings" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + spltest "github.com/splunk/splunk-operator/pkg/splunk/test" +) + +func TestApplyIndexerCluster(t *testing.T) { + funcCalls := []mockFuncCall{ + {metaName: "*v1.Secret-test-splunk-stack1-indexer-secrets"}, + {metaName: "*v1.Service-test-splunk-stack1-indexer-headless"}, + {metaName: "*v1.Service-test-splunk-stack1-indexer-service"}, + {metaName: "*v1.Service-test-splunk-stack1-cluster-master-service"}, + {metaName: "*v1.StatefulSet-test-splunk-stack1-cluster-master"}, + {metaName: "*v1.StatefulSet-test-splunk-stack1-indexer"}, + } + createCalls := map[string][]mockFuncCall{"Get": funcCalls, "Create": funcCalls} + updateCalls := map[string][]mockFuncCall{"Get": funcCalls, "Update": []mockFuncCall{funcCalls[4], funcCalls[5]}} + + current := enterprisev1.IndexerCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IndexerCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + } + revised := current.DeepCopy() + revised.Spec.Image = "splunk/test" + reconcile := func(c *mockClient, cr interface{}) error { + _, err := ApplyIndexerCluster(c, cr.(*enterprisev1.IndexerCluster)) + return err + } + reconcileTester(t, "TestApplyIndexerCluster", ¤t, revised, createCalls, updateCalls, reconcile) + + // test deletion + currentTime := metav1.NewTime(time.Now()) + revised.ObjectMeta.DeletionTimestamp = ¤tTime + revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} + deleteFunc := func(cr enterprisev1.MetaObject, c ControllerClient) (bool, error) { + _, err := ApplyIndexerCluster(c, cr.(*enterprisev1.IndexerCluster)) + return true, err + } + splunkDeletionTester(t, revised, deleteFunc) +} + +func indexerClusterPodManagerTester(t *testing.T, method string, mockHandlers []spltest.MockHTTPHandler, + desiredReplicas int32, wantPhase enterprisev1.ResourcePhase, statefulSet *appsv1.StatefulSet, + wantCalls map[string][]mockFuncCall, wantError error, initObjects ...runtime.Object) { + + // test for updating + scopedLog := log.WithName(method) + cr := enterprisev1.IndexerCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "IndexerCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Status: enterprisev1.IndexerClusterStatus{ + ClusterMasterPhase: enterprisev1.PhaseReady, + }, + } + secrets := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1-secrets", + Namespace: "test", + }, + Data: map[string][]byte{ + "password": []byte{'1', '2', '3'}, + }, + } + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandlers(mockHandlers...) + mgr := &IndexerClusterPodManager{ + log: scopedLog, + cr: &cr, + secrets: secrets, + newSplunkClient: func(managementURI, username, password string) *splclient.SplunkClient { + c := splclient.NewSplunkClient(managementURI, username, password) + c.Client = mockSplunkClient + return c + }, + } + podManagerUpdateTester(t, method, mgr, desiredReplicas, wantPhase, statefulSet, wantCalls, wantError, initObjects...) + mockSplunkClient.CheckRequests(t, method) +} + +func TestIndexerClusterPodManager(t *testing.T) { + var replicas int32 = 1 + statefulSet := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-stack1", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + UpdatedReplicas: replicas, + UpdateRevision: "v1", + }, + } + funcCalls := []mockFuncCall{ + {metaName: "*v1.StatefulSet-test-splunk-stack1"}, + {metaName: "*v1.Pod-test-splunk-stack1-0"}, + } + wantCalls := map[string][]mockFuncCall{"Get": {funcCalls[0]}} + + // test 1 ready pod + mockHandlers := []spltest.MockHTTPHandler{ + {"GET", "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/info?count=0&output_mode=json", 200, nil, + `{"links":{},"origin":"https://localhost:8089/services/cluster/master/info","updated":"2020-03-18T01:04:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"master","id":"https://localhost:8089/services/cluster/master/info/master","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/info/master","list":"/services/cluster/master/info/master"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"apply_bundle_status":{"invalid_bundle":{"bundle_path":"","bundle_validation_errors_on_master":[],"checksum":"","timestamp":0},"reload_bundle_issued":false,"status":"None"},"backup_and_restore_primaries":false,"controlled_rolling_restart_flag":false,"eai:acl":null,"indexing_ready_flag":true,"initialized_flag":true,"label":"splunk-stack1-cluster-master-0","last_check_restart_bundle_result":false,"last_dry_run_bundle":{"bundle_path":"","checksum":"","timestamp":0},"last_validated_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/0af7c0e95f313f7be3b0cb1d878df9a1-1583948640.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","is_valid_bundle":true,"timestamp":1583948640},"latest_bundle":{"bundle_path":"/opt/splunk/var/run/splunk/cluster/remote-bundle/506c58d5aeda1dd6017889e3186e7337-1583870198.bundle","checksum":"14310A4AABD23E85BBD4559C4A3B59F8","timestamp":1583870198},"maintenance_mode":false,"multisite":false,"previous_active_bundle":{"bundle_path":"","checksum":"","timestamp":0},"primaries_backup_status":"No on-going (or) completed primaries backup yet. Check back again in few minutes if you expect a backup.","quiet_period_flag":false,"rolling_restart_flag":false,"rolling_restart_or_upgrade":false,"service_ready_flag":true,"start_time":1583948636,"summary_replication":"false"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`}, + {"GET", "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/peers?count=0&output_mode=json", 200, nil, + `{"links":{"create":"/services/cluster/master/peers/_new"},"origin":"https://localhost:8089/services/cluster/master/peers","updated":"2020-03-18T01:08:53+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"D39B1729-E2C5-4273-B9B2-534DA7C2F866","id":"https://localhost:8089/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","list":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866","edit":"/services/cluster/master/peers/D39B1729-E2C5-4273-B9B2-534DA7C2F866"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","apply_bundle_status":{"invalid_bundle":{"bundle_validation_errors":[],"invalid_bundle_id":""},"reasons_for_restart":[],"restart_required_for_apply_bundle":false,"status":"None"},"base_generation_id":26,"bucket_count":73,"bucket_count_by_index":{"_audit":24,"_internal":45,"_telemetry":4},"buckets_rf_by_origin_site":{"default":73},"buckets_sf_by_origin_site":{"default":73},"delayed_buckets_to_discard":[],"eai:acl":null,"fixup_set":[],"heartbeat_started":true,"host_port_pair":"10.36.0.6:8089","indexing_disk_space":210707374080,"is_searchable":true,"is_valid_bundle":true,"label":"splunk-stack1-indexer-0","last_dry_run_bundle":"","last_heartbeat":1584493732,"last_validated_bundle":"14310A4AABD23E85BBD4559C4A3B59F8","latest_bundle_id":"14310A4AABD23E85BBD4559C4A3B59F8","peer_registered_summaries":true,"pending_builds":[],"pending_job_count":0,"primary_count":73,"primary_count_remote":0,"register_search_address":"10.36.0.6:8089","replication_count":0,"replication_port":9887,"replication_use_ssl":false,"restart_required_for_applying_dry_run_bundle":false,"search_state_counter":{"PendingSearchable":0,"Searchable":73,"SearchablePendingMask":0,"Unsearchable":0},"site":"default","splunk_version":"8.0.2","status":"Up","status_counter":{"Complete":69,"NonStreamingTarget":0,"StreamingSource":4,"StreamingTarget":0},"summary_replication_count":0}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`}, + } + wantCalls = map[string][]mockFuncCall{"Get": funcCalls} + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-stack1-0", + Namespace: "test", + Labels: map[string]string{ + "controller-revision-hash": "v1", + }, + }, + } + method := "IndexerClusterPodManager.Update(All pods ready)" + indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseReady, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod needs update => decommission + mockHandlers = append(mockHandlers, + spltest.MockHTTPHandler{"POST", "https://splunk-stack1-indexer-0.splunk-stack1-indexer-headless.test.svc.cluster.local:8089/services/cluster/slave/control/control/decommission?enforce_counts=0", 200, nil, ``}, + ) + pod.ObjectMeta.Labels["controller-revision-hash"] = "v0" + method = "IndexerClusterPodManager.Update(Decommission Pod)" + indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod needs update => wait for decommission to complete + mockHandlers = []spltest.MockHTTPHandler{mockHandlers[0], mockHandlers[1]} + mockHandlers[1].Body = strings.Replace(mockHandlers[1].Body, `"status":"Up"`, `"status":"ReassigningPrimaries"`, 1) + method = "IndexerClusterPodManager.Update(ReassigningPrimaries)" + indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod needs update => wait for decommission to complete + mockHandlers[1].Body = strings.Replace(mockHandlers[1].Body, `"status":"ReassigningPrimaries"`, `"status":"Decommissioning"`, 1) + method = "IndexerClusterPodManager.Update(Decommissioning)" + indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod needs update => delete pod + wantCalls = map[string][]mockFuncCall{"Get": funcCalls, "Delete": {funcCalls[1]}} + mockHandlers[1].Body = strings.Replace(mockHandlers[1].Body, `"status":"Decommissioning"`, `"status":"Down"`, 1) + method = "IndexerClusterPodManager.Update(Delete Pod)" + indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test scale down => pod not found + pod.ObjectMeta.Name = "splunk-stack1-2" + replicas = 2 + statefulSet.Status.Replicas = 2 + statefulSet.Status.ReadyReplicas = 2 + statefulSet.Status.UpdatedReplicas = 2 + wantCalls = map[string][]mockFuncCall{"Get": {funcCalls[0]}} + method = "IndexerClusterPodManager.Update(Pod Not Found)" + indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseScalingDown, statefulSet, wantCalls, nil, statefulSet, pod) + + // test scale down => decommission pod + mockHandlers[1].Body = `{"entry":[{"name":"aa45bf46-7f46-47af-a760-590d5c606d10","content":{"status":"Up","label":"splunk-stack1-indexer-0"}},{"name":"D39B1729-E2C5-4273-B9B2-534DA7C2F866","content":{"status":"GracefulShutdown","label":"splunk-stack1-indexer-1"}}]}` + mockHandlers = append(mockHandlers, + spltest.MockHTTPHandler{"POST", "https://splunk-stack1-cluster-master-service.test.svc.cluster.local:8089/services/cluster/master/control/control/remove_peers?peers=D39B1729-E2C5-4273-B9B2-534DA7C2F866", 200, nil, ``}, + ) + pvcCalls := []mockFuncCall{ + {metaName: "*v1.PersistentVolumeClaim-test-pvc-etc-splunk-stack1-1"}, + {metaName: "*v1.PersistentVolumeClaim-test-pvc-var-splunk-stack1-1"}, + } + funcCalls[1] = mockFuncCall{metaName: "*v1.Pod-test-splunk-stack1-0"} + wantCalls = map[string][]mockFuncCall{"Get": {funcCalls[0]}, "Delete": pvcCalls, "Update": {funcCalls[0]}} + wantCalls["Get"] = append(wantCalls["Get"], pvcCalls...) + pvcList := []*corev1.PersistentVolumeClaim{ + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-etc-splunk-stack1-1", Namespace: "test"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-var-splunk-stack1-1", Namespace: "test"}}, + } + method = "IndexerClusterPodManager.Update(Decommission)" + indexerClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseScalingDown, statefulSet, wantCalls, nil, statefulSet, pod, pvcList[0], pvcList[1]) +} diff --git a/pkg/splunk/reconcile/licensemaster.go b/pkg/splunk/reconcile/licensemaster.go index 5a21d6d45..8794ab6d9 100644 --- a/pkg/splunk/reconcile/licensemaster.go +++ b/pkg/splunk/reconcile/licensemaster.go @@ -12,44 +12,77 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( + "context" + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// ReconcileLicenseMaster reconciles the state for the Splunk Enterprise license master. -func ReconcileLicenseMaster(client ControllerClient, cr *enterprisev1.LicenseMaster) error { +// ApplyLicenseMaster reconciles the state for the Splunk Enterprise license master. +func ApplyLicenseMaster(client ControllerClient, cr *enterprisev1.LicenseMaster) (reconcile.Result, error) { + + // unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } // validate and updates defaults for CR err := enterprise.ValidateLicenseMasterSpec(&cr.Spec) if err != nil { - return err + return result, err } + // updates status after function completes + cr.Status.Phase = enterprisev1.PhaseError + defer func() { + client.Status().Update(context.TODO(), cr) + }() + // check if deletion has been requested if cr.ObjectMeta.DeletionTimestamp != nil { - _, err := CheckSplunkDeletion(cr, client) - return err + terminating, err := CheckSplunkDeletion(cr, client) + if terminating && err != nil { // don't bother if no error, since it will just be removed immmediately after + cr.Status.Phase = enterprisev1.PhaseTerminating + } else { + result.Requeue = false + } + return result, err } // create or update general config resources - err = ReconcileSplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkLicenseMaster) + _, err = ApplySplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkLicenseMaster) if err != nil { - return err + return result, err } // create or update a service err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkLicenseMaster, false)) if err != nil { - return err + return result, err } // create or update statefulset statefulSet, err := enterprise.GetLicenseMasterStatefulSet(cr) if err != nil { - return err + return result, err + } + mgr := DefaultStatefulSetPodManager{} + phase, err := mgr.Update(client, statefulSet, 1) + if err != nil { + return result, err + } + cr.Status.Phase = phase + + // no need to requeue if everything is ready + if cr.Status.Phase == enterprisev1.PhaseReady { + result.Requeue = false } - return ApplyStatefulSet(client, statefulSet) + return result, nil } diff --git a/pkg/splunk/reconcile/licensemaster_test.go b/pkg/splunk/reconcile/licensemaster_test.go index 2f80b244d..7d5f67fad 100644 --- a/pkg/splunk/reconcile/licensemaster_test.go +++ b/pkg/splunk/reconcile/licensemaster_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "testing" @@ -23,7 +23,7 @@ import ( enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" ) -func TestReconcileLicenseMaster(t *testing.T) { +func TestApplyLicenseMaster(t *testing.T) { funcCalls := []mockFuncCall{ {metaName: "*v1.Secret-test-splunk-stack1-license-master-secrets"}, {metaName: "*v1.Service-test-splunk-stack1-license-master-service"}, @@ -43,16 +43,17 @@ func TestReconcileLicenseMaster(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *mockClient, cr interface{}) error { - return ReconcileLicenseMaster(c, cr.(*enterprisev1.LicenseMaster)) + _, err := ApplyLicenseMaster(c, cr.(*enterprisev1.LicenseMaster)) + return err } - reconcileTester(t, "TestReconcileLicenseMaster", ¤t, revised, createCalls, updateCalls, reconcile) + reconcileTester(t, "TestApplyLicenseMaster", ¤t, revised, createCalls, updateCalls, reconcile) // test deletion currentTime := metav1.NewTime(time.Now()) revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr enterprisev1.MetaObject, c ControllerClient) (bool, error) { - err := ReconcileLicenseMaster(c, cr.(*enterprisev1.LicenseMaster)) + _, err := ApplyLicenseMaster(c, cr.(*enterprisev1.LicenseMaster)) return true, err } splunkDeletionTester(t, revised, deleteFunc) diff --git a/pkg/splunk/reconcile/searchhead.go b/pkg/splunk/reconcile/searchhead.go deleted file mode 100644 index a15ce52cb..000000000 --- a/pkg/splunk/reconcile/searchhead.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package deploy - -import ( - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" - "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// ReconcileSearchHead reconciles the state for a Splunk Enterprise search head cluster. -func ReconcileSearchHead(client ControllerClient, cr *enterprisev1.SearchHead) error { - - // validate and updates defaults for CR - err := enterprise.ValidateSearchHeadSpec(&cr.Spec) - if err != nil { - return err - } - - // check if deletion has been requested - if cr.ObjectMeta.DeletionTimestamp != nil { - _, err := CheckSplunkDeletion(cr, client) - return err - } - - // create or update general config resources - err = ReconcileSplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkSearchHead) - if err != nil { - return err - } - - // create or update a headless search head cluster service - err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkSearchHead, true)) - if err != nil { - return err - } - - // create or update a regular search head cluster service - err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkSearchHead, false)) - if err != nil { - return err - } - - // create or update a deployer service - err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkDeployer, false)) - if err != nil { - return err - } - - // create or update statefulset for the deployer - statefulSet, err := enterprise.GetDeployerStatefulSet(cr) - if err != nil { - return err - } - err = ApplyStatefulSet(client, statefulSet) - if err != nil { - return err - } - - // create or update statefulset for the search heads - statefulSet, err = enterprise.GetSearchHeadStatefulSet(cr) - if err != nil { - return err - } - return ApplyStatefulSet(client, statefulSet) -} diff --git a/pkg/splunk/reconcile/searchhead_test.go b/pkg/splunk/reconcile/searchhead_test.go deleted file mode 100644 index f3582516f..000000000 --- a/pkg/splunk/reconcile/searchhead_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package deploy - -import ( - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" -) - -func TestReconcileSearchHead(t *testing.T) { - funcCalls := []mockFuncCall{ - {metaName: "*v1.Secret-test-splunk-stack1-search-head-secrets"}, - {metaName: "*v1.Service-test-splunk-stack1-search-head-headless"}, - {metaName: "*v1.Service-test-splunk-stack1-search-head-service"}, - {metaName: "*v1.Service-test-splunk-stack1-deployer-service"}, - {metaName: "*v1.StatefulSet-test-splunk-stack1-deployer"}, - {metaName: "*v1.StatefulSet-test-splunk-stack1-search-head"}, - } - createCalls := map[string][]mockFuncCall{"Get": funcCalls, "Create": funcCalls} - updateCalls := map[string][]mockFuncCall{"Get": funcCalls, "Update": []mockFuncCall{funcCalls[4], funcCalls[5]}} - current := enterprisev1.SearchHead{ - TypeMeta: metav1.TypeMeta{ - Kind: "SearchHead", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "stack1", - Namespace: "test", - }, - } - revised := current.DeepCopy() - revised.Spec.Image = "splunk/test" - reconcile := func(c *mockClient, cr interface{}) error { - return ReconcileSearchHead(c, cr.(*enterprisev1.SearchHead)) - } - reconcileTester(t, "TestReconcileSearchHead", ¤t, revised, createCalls, updateCalls, reconcile) - - // test deletion - currentTime := metav1.NewTime(time.Now()) - revised.ObjectMeta.DeletionTimestamp = ¤tTime - revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} - deleteFunc := func(cr enterprisev1.MetaObject, c ControllerClient) (bool, error) { - err := ReconcileSearchHead(c, cr.(*enterprisev1.SearchHead)) - return true, err - } - splunkDeletionTester(t, revised, deleteFunc) -} diff --git a/pkg/splunk/reconcile/searchheadcluster.go b/pkg/splunk/reconcile/searchheadcluster.go new file mode 100644 index 000000000..a8386ce72 --- /dev/null +++ b/pkg/splunk/reconcile/searchheadcluster.go @@ -0,0 +1,267 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reconcile + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + "github.com/splunk/splunk-operator/pkg/splunk/resources" +) + +// ApplySearchHeadCluster reconciles the state for a Splunk Enterprise search head cluster. +func ApplySearchHeadCluster(client ControllerClient, cr *enterprisev1.SearchHeadCluster) (reconcile.Result, error) { + // unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + scopedLog := log.WithName("ApplySearchHeadCluster").WithValues("name", cr.GetIdentifier(), "namespace", cr.GetNamespace()) + + // validate and updates defaults for CR + err := enterprise.ValidateSearchHeadClusterSpec(&cr.Spec) + if err != nil { + return result, err + } + + // updates status after function completes + cr.Status.Phase = enterprisev1.PhaseError + cr.Status.DeployerPhase = enterprisev1.PhaseError + cr.Status.Replicas = cr.Spec.Replicas + cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-search-head", cr.GetIdentifier()) + defer func() { + client.Status().Update(context.TODO(), cr) + }() + + // check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := CheckSplunkDeletion(cr, client) + if terminating && err != nil { // don't bother if no error, since it will just be removed immmediately after + cr.Status.Phase = enterprisev1.PhaseTerminating + cr.Status.DeployerPhase = enterprisev1.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + // create or update general config resources + secrets, err := ApplySplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkSearchHead) + if err != nil { + return result, err + } + + // create or update a headless search head cluster service + err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkSearchHead, true)) + if err != nil { + return result, err + } + + // create or update a regular search head cluster service + err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkSearchHead, false)) + if err != nil { + return result, err + } + + // create or update a deployer service + err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkDeployer, false)) + if err != nil { + return result, err + } + + // create or update statefulset for the deployer + statefulSet, err := enterprise.GetDeployerStatefulSet(cr) + if err != nil { + return result, err + } + deployerManager := DefaultStatefulSetPodManager{} + phase, err := deployerManager.Update(client, statefulSet, 1) + if err != nil { + return result, err + } + cr.Status.DeployerPhase = phase + + // create or update statefulset for the search heads + statefulSet, err = enterprise.GetSearchHeadStatefulSet(cr) + if err != nil { + return result, err + } + mgr := SearchHeadClusterPodManager{log: scopedLog, cr: cr, secrets: secrets, newSplunkClient: splclient.NewSplunkClient} + phase, err = mgr.Update(client, statefulSet, cr.Spec.Replicas) + if err != nil { + return result, err + } + cr.Status.Phase = phase + + // no need to requeue if everything is ready + if cr.Status.Phase == enterprisev1.PhaseReady { + result.Requeue = false + } + return result, nil +} + +// SearchHeadClusterPodManager is used to manage the pods within a search head cluster +type SearchHeadClusterPodManager struct { + log logr.Logger + cr *enterprisev1.SearchHeadCluster + secrets *corev1.Secret + newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient +} + +// Update for SearchHeadClusterPodManager handles all updates for a statefulset of search heads +func (mgr *SearchHeadClusterPodManager) Update(c ControllerClient, statefulSet *appsv1.StatefulSet, desiredReplicas int32) (enterprisev1.ResourcePhase, error) { + // update statefulset, if necessary + _, err := ApplyStatefulSet(c, statefulSet) + if err != nil { + return enterprisev1.PhaseError, err + } + + // update CR status with SHC information + err = mgr.updateStatus(statefulSet) + if err != nil || mgr.cr.Status.ReadyReplicas == 0 || !mgr.cr.Status.Initialized || !mgr.cr.Status.CaptainReady { + mgr.log.Error(err, "Search head cluster is not ready") + return enterprisev1.PhasePending, nil + } + + // manage scaling and updates + return UpdateStatefulSetPods(c, statefulSet, mgr, desiredReplicas) +} + +// PrepareScaleDown for SearchHeadClusterPodManager prepares search head pod to be removed via scale down event; it returns true when ready +func (mgr *SearchHeadClusterPodManager) PrepareScaleDown(n int32) (bool, error) { + // start by quarantining the pod + result, err := mgr.PrepareRecycle(n) + if err != nil || !result { + return result, err + } + + // pod is quarantined; decommission it + memberName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkSearchHead, mgr.cr.GetIdentifier(), n) + mgr.log.Info("Removing member from search head cluster", "memberName", memberName) + c := mgr.getClient(n) + err = c.RemoveSearchHeadClusterMember() + if err != nil { + return false, err + } + + // all done -> ok to scale down the statefulset + return true, nil +} + +// PrepareRecycle for SearchHeadClusterPodManager prepares search head pod to be recycled for updates; it returns true when ready +func (mgr *SearchHeadClusterPodManager) PrepareRecycle(n int32) (bool, error) { + memberName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkSearchHead, mgr.cr.GetIdentifier(), n) + + switch mgr.cr.Status.Members[n].Status { + case "Up": + // Detain search head + mgr.log.Info("Detaining search head cluster member", "memberName", memberName) + c := mgr.getClient(n) + return false, c.SetSearchHeadDetention(true) + + case "ManualDetention": + // Wait until active searches have drained + searchesComplete := mgr.cr.Status.Members[n].ActiveHistoricalSearchCount+mgr.cr.Status.Members[n].ActiveRealtimeSearchCount == 0 + if searchesComplete { + mgr.log.Info("Detention complete", "memberName", memberName) + } else { + mgr.log.Info("Waiting for active searches to complete", "memberName", memberName) + } + return searchesComplete, nil + } + + // unhandled status + return false, fmt.Errorf("Status=%s", mgr.cr.Status.Members[n].Status) +} + +// FinishRecycle for SearchHeadClusterPodManager completes recycle event for search head pod; it returns true when complete +func (mgr *SearchHeadClusterPodManager) FinishRecycle(n int32) (bool, error) { + memberName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkSearchHead, mgr.cr.GetIdentifier(), n) + + switch mgr.cr.Status.Members[n].Status { + case "Up": + // not in detention + return true, nil + + case "ManualDetention": + // release from detention + mgr.log.Info("Releasing search head cluster member from detention", "memberName", memberName) + c := mgr.getClient(n) + return false, c.SetSearchHeadDetention(false) + } + + // unhandled status + return false, fmt.Errorf("Status=%s", mgr.cr.Status.Members[n].Status) +} + +// getClient for SearchHeadClusterPodManager returns a SplunkClient for the member n +func (mgr *SearchHeadClusterPodManager) getClient(n int32) *splclient.SplunkClient { + memberName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkSearchHead, mgr.cr.GetIdentifier(), n) + fqdnName := resources.GetServiceFQDN(mgr.cr.GetNamespace(), + fmt.Sprintf("%s.%s", memberName, enterprise.GetSplunkServiceName(enterprise.SplunkSearchHead, mgr.cr.GetIdentifier(), true))) + return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(mgr.secrets.Data["password"])) +} + +// updateStatus for SearchHeadClusterPodManager uses the REST API to update the status for a SearcHead custom resource +func (mgr *SearchHeadClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSet) error { + // populate members status using REST API to get search head cluster member info + mgr.cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas + if mgr.cr.Status.ReadyReplicas == 0 { + return nil + } + gotCaptainInfo := false + mgr.cr.Status.Members = []enterprisev1.SearchHeadClusterMemberStatus{} + for n := int32(0); n < mgr.cr.Status.ReadyReplicas; n++ { + c := mgr.getClient(n) + memberName := enterprise.GetSplunkStatefulsetPodName(enterprise.SplunkSearchHead, mgr.cr.GetIdentifier(), n) + memberStatus := enterprisev1.SearchHeadClusterMemberStatus{Name: memberName} + memberInfo, err := c.GetSearchHeadClusterMemberInfo() + if err == nil { + memberStatus.Status = memberInfo.Status + memberStatus.Adhoc = memberInfo.Adhoc + memberStatus.Registered = memberInfo.Registered + memberStatus.ActiveHistoricalSearchCount = memberInfo.ActiveHistoricalSearchCount + memberStatus.ActiveRealtimeSearchCount = memberInfo.ActiveRealtimeSearchCount + if !gotCaptainInfo { + // try querying captain api; note that this should work on any node + captainInfo, err := c.GetSearchHeadCaptainInfo() + if err == nil { + mgr.cr.Status.Captain = captainInfo.Label + mgr.cr.Status.CaptainReady = captainInfo.ServiceReady + mgr.cr.Status.Initialized = captainInfo.Initialized + mgr.cr.Status.MinPeersJoined = captainInfo.MinPeersJoined + mgr.cr.Status.MaintenanceMode = captainInfo.MaintenanceMode + gotCaptainInfo = true + } + } + } else if n < statefulSet.Status.Replicas { + // ignore error if pod was just terminated for scale down event (n >= Replicas) + mgr.log.Error(err, "Unable to retrieve search head cluster member info", "memberName", memberName) + return err + } + mgr.cr.Status.Members = append(mgr.cr.Status.Members, memberStatus) + } + + return nil +} diff --git a/pkg/splunk/reconcile/searchheadcluster_test.go b/pkg/splunk/reconcile/searchheadcluster_test.go new file mode 100644 index 000000000..ffe69eef2 --- /dev/null +++ b/pkg/splunk/reconcile/searchheadcluster_test.go @@ -0,0 +1,215 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package reconcile + +import ( + "strings" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + spltest "github.com/splunk/splunk-operator/pkg/splunk/test" +) + +func TestApplySearchHeadCluster(t *testing.T) { + funcCalls := []mockFuncCall{ + {metaName: "*v1.Secret-test-splunk-stack1-search-head-secrets"}, + {metaName: "*v1.Service-test-splunk-stack1-search-head-headless"}, + {metaName: "*v1.Service-test-splunk-stack1-search-head-service"}, + {metaName: "*v1.Service-test-splunk-stack1-deployer-service"}, + {metaName: "*v1.StatefulSet-test-splunk-stack1-deployer"}, + {metaName: "*v1.StatefulSet-test-splunk-stack1-search-head"}, + } + createCalls := map[string][]mockFuncCall{"Get": funcCalls, "Create": funcCalls} + updateCalls := map[string][]mockFuncCall{"Get": funcCalls, "Update": []mockFuncCall{funcCalls[4], funcCalls[5]}} + statefulSet := enterprisev1.SearchHeadCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "SearchHeadCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + } + revised := statefulSet.DeepCopy() + revised.Spec.Image = "splunk/test" + reconcile := func(c *mockClient, cr interface{}) error { + _, err := ApplySearchHeadCluster(c, cr.(*enterprisev1.SearchHeadCluster)) + return err + } + reconcileTester(t, "TestApplySearchHeadCluster", &statefulSet, revised, createCalls, updateCalls, reconcile) + + // test deletion + currentTime := metav1.NewTime(time.Now()) + revised.ObjectMeta.DeletionTimestamp = ¤tTime + revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} + deleteFunc := func(cr enterprisev1.MetaObject, c ControllerClient) (bool, error) { + _, err := ApplySearchHeadCluster(c, cr.(*enterprisev1.SearchHeadCluster)) + return true, err + } + splunkDeletionTester(t, revised, deleteFunc) +} + +func searchHeadClusterPodManagerTester(t *testing.T, method string, mockHandlers []spltest.MockHTTPHandler, + desiredReplicas int32, wantPhase enterprisev1.ResourcePhase, statefulSet *appsv1.StatefulSet, + wantCalls map[string][]mockFuncCall, wantError error, initObjects ...runtime.Object) { + + // test for updating + scopedLog := log.WithName(method) + cr := enterprisev1.SearchHeadCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "SearchHeadCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + } + secrets := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1-secrets", + Namespace: "test", + }, + Data: map[string][]byte{ + "password": []byte{'1', '2', '3'}, + }, + } + mockSplunkClient := &spltest.MockHTTPClient{} + mockSplunkClient.AddHandlers(mockHandlers...) + mgr := &SearchHeadClusterPodManager{ + log: scopedLog, + cr: &cr, + secrets: secrets, + newSplunkClient: func(managementURI, username, password string) *splclient.SplunkClient { + c := splclient.NewSplunkClient(managementURI, username, password) + c.Client = mockSplunkClient + return c + }, + } + podManagerUpdateTester(t, method, mgr, desiredReplicas, wantPhase, statefulSet, wantCalls, wantError, initObjects...) + mockSplunkClient.CheckRequests(t, method) +} + +func TestSearchHeadClusterPodManager(t *testing.T) { + var replicas int32 = 1 + statefulSet := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-stack1", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + UpdatedReplicas: replicas, + UpdateRevision: "v1", + }, + } + mockHandlers := []spltest.MockHTTPHandler{ + {"GET", "https://splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local:8089/services/shcluster/member/info?count=0&output_mode=json", 500, nil, ``}, + } + funcCalls := []mockFuncCall{ + {metaName: "*v1.StatefulSet-test-splunk-stack1"}, + {metaName: "*v1.Pod-test-splunk-stack1-0"}, + } + wantCalls := map[string][]mockFuncCall{"Get": {funcCalls[0]}} + + // test API failure + method := "SearchHeadClusterPodManager.Update(API failure)" + searchHeadClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhasePending, statefulSet, wantCalls, nil, statefulSet) + + // test 1 ready pod + mockHandlers = []spltest.MockHTTPHandler{ + {"GET", "https://splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local:8089/services/shcluster/member/info?count=0&output_mode=json", 200, nil, + `{"links":{},"origin":"https://localhost:8089/services/shcluster/member/info","updated":"2020-03-15T16:30:38+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"member","id":"https://localhost:8089/services/shcluster/member/info/member","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/member/info/member","list":"/services/shcluster/member/info/member"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_historical_search_count":0,"active_realtime_search_count":0,"adhoc_searchhead":false,"eai:acl":null,"is_registered":true,"last_heartbeat_attempt":1584289836,"maintenance_mode":false,"no_artifact_replications":false,"peer_load_stats_gla_15m":0,"peer_load_stats_gla_1m":0,"peer_load_stats_gla_5m":0,"peer_load_stats_max_runtime":0,"peer_load_stats_num_autosummary":0,"peer_load_stats_num_historical":0,"peer_load_stats_num_realtime":0,"peer_load_stats_num_running":0,"peer_load_stats_total_runtime":0,"restart_state":"NoRestart","status":"Up"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`}, + {"GET", "https://splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local:8089/services/shcluster/captain/info?count=0&output_mode=json", 200, nil, + `{"links":{},"origin":"https://localhost:8089/services/shcluster/captain/info","updated":"2020-03-15T16:36:42+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"captain","id":"https://localhost:8089/services/shcluster/captain/info/captain","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/captain/info/captain","list":"/services/shcluster/captain/info/captain"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"eai:acl":null,"elected_captain":1584139352,"id":"A9D5FCCF-EB93-4E0A-93E1-45B56483EA7A","initialized_flag":true,"label":"splunk-s2-search-head-0","maintenance_mode":false,"mgmt_uri":"https://splunk-s2-search-head-0.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","min_peers_joined_flag":true,"peer_scheme_host_port":"https://splunk-s2-search-head-0.splunk-s2-search-head-headless.splunk.svc.cluster.local:8089","rolling_restart_flag":false,"service_ready_flag":true,"start_time":1584139291}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`}, + } + wantCalls = map[string][]mockFuncCall{"Get": funcCalls} + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-stack1-0", + Namespace: "test", + Labels: map[string]string{ + "controller-revision-hash": "v1", + }, + }, + } + method = "SearchHeadClusterPodManager.Update(All pods ready)" + searchHeadClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseReady, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod needs update => transition to detention + mockHandlers = append(mockHandlers, + spltest.MockHTTPHandler{"POST", "https://splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local:8089/services/shcluster/member/control/control/set_manual_detention?manual_detention=on", 200, nil, ``}, + ) + pod.ObjectMeta.Labels["controller-revision-hash"] = "v0" + method = "SearchHeadClusterPodManager.Update(Quarantine Pod)" + searchHeadClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod needs update => wait for searches to drain + mockHandlers = []spltest.MockHTTPHandler{mockHandlers[0], mockHandlers[1]} + mockHandlers[0].Body = strings.Replace(mockHandlers[0].Body, `"status":"Up"`, `"status":"ManualDetention"`, 1) + mockHandlers[0].Body = strings.Replace(mockHandlers[0].Body, `"active_historical_search_count":0`, `"active_historical_search_count":1`, 1) + method = "SearchHeadClusterPodManager.Update(Draining Searches)" + searchHeadClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod needs update => delete pod + wantCalls = map[string][]mockFuncCall{"Get": funcCalls, "Delete": {funcCalls[1]}} + mockHandlers[0].Body = strings.Replace(mockHandlers[0].Body, `"active_historical_search_count":1`, `"active_historical_search_count":0`, 1) + method = "SearchHeadClusterPodManager.Update(Delete Pod)" + searchHeadClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test pod update finished => release from detention + wantCalls = map[string][]mockFuncCall{"Get": funcCalls} + pod.ObjectMeta.Labels["controller-revision-hash"] = "v1" + mockHandlers = append(mockHandlers, + spltest.MockHTTPHandler{"POST", "https://splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local:8089/services/shcluster/member/control/control/set_manual_detention?manual_detention=off", 200, nil, ``}, + ) + method = "SearchHeadClusterPodManager.Update(Release Quarantine)" + searchHeadClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseUpdating, statefulSet, wantCalls, nil, statefulSet, pod) + + // test scale down => remove member + mockHandlers[2] = spltest.MockHTTPHandler{"GET", "https://splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local:8089/services/shcluster/member/info?count=0&output_mode=json", 200, nil, + `{"links":{},"origin":"https://localhost:8089/services/shcluster/member/info","updated":"2020-03-15T16:30:38+00:00","generator":{"build":"a7f645ddaf91","version":"8.0.2"},"entry":[{"name":"member","id":"https://localhost:8089/services/shcluster/member/info/member","updated":"1970-01-01T00:00:00+00:00","links":{"alternate":"/services/shcluster/member/info/member","list":"/services/shcluster/member/info/member"},"author":"system","acl":{"app":"","can_list":true,"can_write":true,"modifiable":false,"owner":"system","perms":{"read":["admin","splunk-system-role"],"write":["admin","splunk-system-role"]},"removable":false,"sharing":"system"},"content":{"active_historical_search_count":0,"active_realtime_search_count":0,"adhoc_searchhead":false,"eai:acl":null,"is_registered":true,"last_heartbeat_attempt":1584289836,"maintenance_mode":false,"no_artifact_replications":false,"peer_load_stats_gla_15m":0,"peer_load_stats_gla_1m":0,"peer_load_stats_gla_5m":0,"peer_load_stats_max_runtime":0,"peer_load_stats_num_autosummary":0,"peer_load_stats_num_historical":0,"peer_load_stats_num_realtime":0,"peer_load_stats_num_running":0,"peer_load_stats_total_runtime":0,"restart_state":"NoRestart","status":"ManualDetention"}}],"paging":{"total":1,"perPage":30,"offset":0},"messages":[]}`} + mockHandlers = append(mockHandlers, + spltest.MockHTTPHandler{"POST", "https://splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local:8089/services/shcluster/member/consensus/default/remove_server?output_mode=json", 200, nil, ``}, + ) + pvcCalls := []mockFuncCall{ + {metaName: "*v1.PersistentVolumeClaim-test-pvc-etc-splunk-stack1-1"}, + {metaName: "*v1.PersistentVolumeClaim-test-pvc-var-splunk-stack1-1"}, + } + funcCalls[1] = mockFuncCall{metaName: "*v1.Pod-test-splunk-stack1-0"} + wantCalls = map[string][]mockFuncCall{"Get": {funcCalls[0]}, "Delete": pvcCalls, "Update": {funcCalls[0]}} + wantCalls["Get"] = append(wantCalls["Get"], pvcCalls...) + pvcList := []*corev1.PersistentVolumeClaim{ + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-etc-splunk-stack1-1", Namespace: "test"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-var-splunk-stack1-1", Namespace: "test"}}, + } + pod.ObjectMeta.Name = "splunk-stack1-2" + replicas = 2 + statefulSet.Status.Replicas = 2 + statefulSet.Status.ReadyReplicas = 2 + statefulSet.Status.UpdatedReplicas = 2 + method = "SearchHeadClusterPodManager.Update(Remove Member)" + searchHeadClusterPodManagerTester(t, method, mockHandlers, 1, enterprisev1.PhaseScalingDown, statefulSet, wantCalls, nil, statefulSet, pod, pvcList[0], pvcList[1]) +} diff --git a/pkg/splunk/reconcile/service.go b/pkg/splunk/reconcile/service.go index 0bf52137c..ba42571a0 100644 --- a/pkg/splunk/reconcile/service.go +++ b/pkg/splunk/reconcile/service.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "context" diff --git a/pkg/splunk/reconcile/service_test.go b/pkg/splunk/reconcile/service_test.go index ec0d4cb42..f2e5898cd 100644 --- a/pkg/splunk/reconcile/service_test.go +++ b/pkg/splunk/reconcile/service_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "testing" diff --git a/pkg/splunk/reconcile/spark.go b/pkg/splunk/reconcile/spark.go index e10c7ee4c..c8a31bf7f 100644 --- a/pkg/splunk/reconcile/spark.go +++ b/pkg/splunk/reconcile/spark.go @@ -12,59 +12,87 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( + "context" + "fmt" + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" "github.com/splunk/splunk-operator/pkg/splunk/spark" ) -// ReconcileSpark reconciles the Deployments and Services for a Spark cluster. -func ReconcileSpark(client ControllerClient, cr *enterprisev1.Spark) error { +// ApplySpark reconciles the Deployments and Services for a Spark cluster. +func ApplySpark(client ControllerClient, cr *enterprisev1.Spark) (reconcile.Result, error) { + + // unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } // validate and updates defaults for CR err := spark.ValidateSparkSpec(&cr.Spec) if err != nil { - return err + return result, err } + // updates status after function completes + cr.Status.Phase = enterprisev1.PhaseError + cr.Status.Replicas = cr.Spec.Replicas + cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-spark-worker", cr.GetIdentifier()) + defer func() { + client.Status().Update(context.TODO(), cr) + }() + // check if deletion has been requested if cr.ObjectMeta.DeletionTimestamp != nil { - _, err := CheckSplunkDeletion(cr, client) - return err + terminating, err := CheckSplunkDeletion(cr, client) + if terminating && err != nil { // don't bother if no error, since it will just be removed immmediately after + cr.Status.Phase = enterprisev1.PhaseTerminating + } else { + result.Requeue = false + } + return result, err } // create or update a service for spark master err = ApplyService(client, spark.GetSparkService(cr, spark.SparkMaster, false)) if err != nil { - return err + return result, err } // create or update a headless service for spark workers err = ApplyService(client, spark.GetSparkService(cr, spark.SparkWorker, true)) if err != nil { - return err + return result, err } // create or update deployment for spark master deployment, err := spark.GetSparkDeployment(cr, spark.SparkMaster) if err != nil { - return err + return result, err } - err = ApplyDeployment(client, deployment) + cr.Status.MasterPhase, err = ApplyDeployment(client, deployment) if err != nil { - return err + cr.Status.MasterPhase = enterprisev1.PhaseError + return result, err } // create or update deployment for spark worker deployment, err = spark.GetSparkDeployment(cr, spark.SparkWorker) if err != nil { - return err + return result, err } - err = ApplyDeployment(client, deployment) + cr.Status.Phase, err = ApplyDeployment(client, deployment) + cr.Status.ReadyReplicas = deployment.Status.ReadyReplicas if err != nil { - return err + cr.Status.Phase = enterprisev1.PhaseError + } else if cr.Status.Phase == enterprisev1.PhaseReady { + result.Requeue = false } - - return nil + return result, err } diff --git a/pkg/splunk/reconcile/spark_test.go b/pkg/splunk/reconcile/spark_test.go index 17e1eee7d..1f6f72dd0 100644 --- a/pkg/splunk/reconcile/spark_test.go +++ b/pkg/splunk/reconcile/spark_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "testing" @@ -23,7 +23,7 @@ import ( enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" ) -func TestReconcileSpark(t *testing.T) { +func TestApplySpark(t *testing.T) { funcCalls := []mockFuncCall{ {metaName: "*v1.Service-test-splunk-stack1-spark-master-service"}, {metaName: "*v1.Service-test-splunk-stack1-spark-worker-headless"}, @@ -44,16 +44,17 @@ func TestReconcileSpark(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *mockClient, cr interface{}) error { - return ReconcileSpark(c, cr.(*enterprisev1.Spark)) + _, err := ApplySpark(c, cr.(*enterprisev1.Spark)) + return err } - reconcileTester(t, "TestReconcileSpark", ¤t, revised, createCalls, updateCalls, reconcile) + reconcileTester(t, "TestApplySpark", ¤t, revised, createCalls, updateCalls, reconcile) // test deletion currentTime := metav1.NewTime(time.Now()) revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr enterprisev1.MetaObject, c ControllerClient) (bool, error) { - err := ReconcileSpark(c, cr.(*enterprisev1.Spark)) + _, err := ApplySpark(c, cr.(*enterprisev1.Spark)) return true, err } splunkDeletionTester(t, revised, deleteFunc) diff --git a/pkg/splunk/reconcile/standalone.go b/pkg/splunk/reconcile/standalone.go index 3e25460c6..cbe7ca118 100644 --- a/pkg/splunk/reconcile/standalone.go +++ b/pkg/splunk/reconcile/standalone.go @@ -12,44 +12,81 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( + "context" + "fmt" + "time" + + "sigs.k8s.io/controller-runtime/pkg/reconcile" + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// ReconcileStandalone reconciles the StatefulSet for N standalone instances of Splunk Enterprise. -func ReconcileStandalone(client ControllerClient, cr *enterprisev1.Standalone) error { +// ApplyStandalone reconciles the StatefulSet for N standalone instances of Splunk Enterprise. +func ApplyStandalone(client ControllerClient, cr *enterprisev1.Standalone) (reconcile.Result, error) { + + // unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } // validate and updates defaults for CR err := enterprise.ValidateStandaloneSpec(&cr.Spec) if err != nil { - return err + return result, err } + // updates status after function completes + cr.Status.Phase = enterprisev1.PhaseError + cr.Status.Replicas = cr.Spec.Replicas + cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-standalone", cr.GetIdentifier()) + defer func() { + client.Status().Update(context.TODO(), cr) + }() + // check if deletion has been requested if cr.ObjectMeta.DeletionTimestamp != nil { - _, err := CheckSplunkDeletion(cr, client) - return err + terminating, err := CheckSplunkDeletion(cr, client) + if terminating && err != nil { // don't bother if no error, since it will just be removed immmediately after + cr.Status.Phase = enterprisev1.PhaseTerminating + } else { + result.Requeue = false + } + return result, err } // create or update general config resources - err = ReconcileSplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkStandalone) + _, err = ApplySplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, enterprise.SplunkStandalone) if err != nil { - return err + return result, err } // create or update a headless service (this is required by DFS for Spark->standalone comms, possibly other things) err = ApplyService(client, enterprise.GetSplunkService(cr, cr.Spec.CommonSpec, enterprise.SplunkStandalone, true)) if err != nil { - return err + return result, err } // create or update statefulset statefulSet, err := enterprise.GetStandaloneStatefulSet(cr) if err != nil { - return err + return result, err + } + mgr := DefaultStatefulSetPodManager{} + phase, err := mgr.Update(client, statefulSet, cr.Spec.Replicas) + cr.Status.ReadyReplicas = statefulSet.Status.ReadyReplicas + if err != nil { + return result, err + } + cr.Status.Phase = phase + + // no need to requeue if everything is ready + if cr.Status.Phase == enterprisev1.PhaseReady { + result.Requeue = false } - return ApplyStatefulSet(client, statefulSet) + return result, nil } diff --git a/pkg/splunk/reconcile/standalone_test.go b/pkg/splunk/reconcile/standalone_test.go index ce2dd6e7d..a4627d590 100644 --- a/pkg/splunk/reconcile/standalone_test.go +++ b/pkg/splunk/reconcile/standalone_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "testing" @@ -23,7 +23,7 @@ import ( enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" ) -func TestReconcileStandalone(t *testing.T) { +func TestApplyStandalone(t *testing.T) { funcCalls := []mockFuncCall{ {metaName: "*v1.Secret-test-splunk-stack1-standalone-secrets"}, {metaName: "*v1.Service-test-splunk-stack1-standalone-headless"}, @@ -43,16 +43,17 @@ func TestReconcileStandalone(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *mockClient, cr interface{}) error { - return ReconcileStandalone(c, cr.(*enterprisev1.Standalone)) + _, err := ApplyStandalone(c, cr.(*enterprisev1.Standalone)) + return err } - reconcileTester(t, "TestReconcileStandalone", ¤t, revised, createCalls, updateCalls, reconcile) + reconcileTester(t, "TestApplyStandalone", ¤t, revised, createCalls, updateCalls, reconcile) // test deletion currentTime := metav1.NewTime(time.Now()) revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr enterprisev1.MetaObject, c ControllerClient) (bool, error) { - err := ReconcileStandalone(c, cr.(*enterprisev1.Standalone)) + _, err := ApplyStandalone(c, cr.(*enterprisev1.Standalone)) return true, err } splunkDeletionTester(t, revised, deleteFunc) diff --git a/pkg/splunk/reconcile/statefulset.go b/pkg/splunk/reconcile/statefulset.go index b03a3ed1c..ebec6026a 100644 --- a/pkg/splunk/reconcile/statefulset.go +++ b/pkg/splunk/reconcile/statefulset.go @@ -12,63 +12,226 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "context" + "fmt" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" ) +// StatefulSetPodManager is used to manage the pods within a StatefulSet +type StatefulSetPodManager interface { + // Update handles all updates for a statefulset and all of its pods + Update(ControllerClient, *appsv1.StatefulSet, int32) (enterprisev1.ResourcePhase, error) + + // PrepareScaleDown prepares pod to be removed via scale down event; it returns true when ready + PrepareScaleDown(int32) (bool, error) + + // PrepareRecycle prepares pod to be recycled for updates; it returns true when ready + PrepareRecycle(int32) (bool, error) + + // FinishRecycle completes recycle event for pod and returns true, or returns false if nothing to do + FinishRecycle(int32) (bool, error) +} + +// DefaultStatefulSetPodManager is a simple StatefulSetPodManager that does nothing +type DefaultStatefulSetPodManager struct{} + +// Update for DefaultStatefulSetPodManager handles all updates for a statefulset of standard pods +func (mgr *DefaultStatefulSetPodManager) Update(client ControllerClient, statefulSet *appsv1.StatefulSet, desiredReplicas int32) (enterprisev1.ResourcePhase, error) { + phase, err := ApplyStatefulSet(client, statefulSet) + if err == nil && phase == enterprisev1.PhaseReady { + phase, err = UpdateStatefulSetPods(client, statefulSet, mgr, desiredReplicas) + } + return phase, err +} + +// PrepareScaleDown for DefaultStatefulSetPodManager does nothing and returns true +func (mgr *DefaultStatefulSetPodManager) PrepareScaleDown(n int32) (bool, error) { + return true, nil +} + +// PrepareRecycle for DefaultStatefulSetPodManager does nothing and returns true +func (mgr *DefaultStatefulSetPodManager) PrepareRecycle(n int32) (bool, error) { + return true, nil +} + +// FinishRecycle for DefaultStatefulSetPodManager does nothing and returns false +func (mgr *DefaultStatefulSetPodManager) FinishRecycle(n int32) (bool, error) { + return true, nil +} + // ApplyStatefulSet creates or updates a Kubernetes StatefulSet -func ApplyStatefulSet(client ControllerClient, statefulSet *appsv1.StatefulSet) error { - scopedLog := log.WithName("ApplyStatefulSet").WithValues( +func ApplyStatefulSet(c ControllerClient, revised *appsv1.StatefulSet) (enterprisev1.ResourcePhase, error) { + namespacedName := types.NamespacedName{Namespace: revised.GetNamespace(), Name: revised.GetName()} + var current appsv1.StatefulSet + + err := c.Get(context.TODO(), namespacedName, ¤t) + if err != nil { + // no StatefulSet exists -> just create a new one + err = CreateResource(c, revised) + return enterprisev1.PhasePending, err + } + + // found an existing StatefulSet + + // check for changes in Pod template + hasUpdates := MergePodUpdates(¤t.Spec.Template, &revised.Spec.Template, current.GetObjectMeta().GetName()) + *revised = current // caller expects that object passed represents latest state + + // only update if there are material differences, as determined by comparison function + if hasUpdates { + // this updates the desired state template, but doesn't actually modify any pods + // because we use an "OnUpdate" strategy https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + // note also that this ignores Replicas, which is handled below by UpdateStatefulSetPods + return enterprisev1.PhaseUpdating, UpdateResource(c, revised) + } + + // scaling and pod updates are handled by UpdateStatefulSetPods + return enterprisev1.PhaseReady, nil +} + +// UpdateStatefulSetPods manages scaling and config updates for StatefulSets +func UpdateStatefulSetPods(c ControllerClient, statefulSet *appsv1.StatefulSet, mgr StatefulSetPodManager, desiredReplicas int32) (enterprisev1.ResourcePhase, error) { + + scopedLog := log.WithName("UpdateStatefulSetPods").WithValues( "name", statefulSet.GetObjectMeta().GetName(), "namespace", statefulSet.GetObjectMeta().GetNamespace()) - namespacedName := types.NamespacedName{Namespace: statefulSet.GetNamespace(), Name: statefulSet.GetName()} - var current appsv1.StatefulSet - - err := client.Get(context.TODO(), namespacedName, ¤t) - if err == nil { - // found existing StatefulSet - if MergeStatefulSetUpdates(¤t, statefulSet) { - // only update if there are material differences, as determined by comparison function - err = UpdateResource(client, ¤t) - } else { - scopedLog.Info("No changes for StatefulSet") + // wait for all replicas ready + replicas := *statefulSet.Spec.Replicas + readyReplicas := statefulSet.Status.ReadyReplicas + if readyReplicas < replicas { + scopedLog.Info("Waiting for pods to become ready") + if readyReplicas > 0 { + return enterprisev1.PhaseScalingUp, nil } - } else { - err = CreateResource(client, statefulSet) + return enterprisev1.PhasePending, nil + } else if readyReplicas > replicas { + scopedLog.Info("Waiting for scale down to complete") + return enterprisev1.PhaseScalingDown, nil } - return err -} + // readyReplicas == replicas -// MergeStatefulSetUpdates looks for material differences between a -// StatefulSet's current config and a revised config. It merges material -// changes from revised to current. This enables us to minimize updates. -// It returns true if there are material differences between them, or false otherwise. -func MergeStatefulSetUpdates(current *appsv1.StatefulSet, revised *appsv1.StatefulSet) bool { - scopedLog := log.WithName("MergeStatefulSetUpdates").WithValues( - "name", current.GetObjectMeta().GetName(), - "namespace", current.GetObjectMeta().GetNamespace()) - result := false - - // check for change in Replicas count - if current.Spec.Replicas != nil && revised.Spec.Replicas != nil && *current.Spec.Replicas != *revised.Spec.Replicas { - scopedLog.Info("StatefulSet Replicas differ", - "current", *current.Spec.Replicas, - "revised", *revised.Spec.Replicas) - current.Spec.Replicas = revised.Spec.Replicas - result = true + // check for scaling up + if readyReplicas < desiredReplicas { + // scale up StatefulSet to match desiredReplicas + scopedLog.Info("Scaling replicas up", "replicas", desiredReplicas) + *statefulSet.Spec.Replicas = desiredReplicas + return enterprisev1.PhaseScalingUp, UpdateResource(c, statefulSet) } - // check for changes in Pod template - if MergePodUpdates(¤t.Spec.Template, &revised.Spec.Template, current.GetObjectMeta().GetName()) { - result = true + // check for scaling down + if readyReplicas > desiredReplicas { + // prepare pod for removal via scale down + n := readyReplicas - 1 + podName := fmt.Sprintf("%s-%d", statefulSet.GetName(), n) + ready, err := mgr.PrepareScaleDown(n) + if err != nil { + scopedLog.Error(err, "Unable to decommission Pod", "podName", podName) + return enterprisev1.PhaseError, err + } + if !ready { + // wait until pod quarantine has completed before deleting it + return enterprisev1.PhaseScalingDown, nil + } + + // scale down statefulset to terminate pod + scopedLog.Info("Scaling replicas down", "replicas", n) + *statefulSet.Spec.Replicas = n + err = UpdateResource(c, statefulSet) + if err != nil { + scopedLog.Error(err, "Scale down update failed for StatefulSet") + return enterprisev1.PhaseError, err + } + + // delete PVCs used by the pod so that a future scale up will have clean state + for _, vol := range []string{"pvc-etc", "pvc-var"} { + namespacedName := types.NamespacedName{ + Namespace: statefulSet.GetNamespace(), + Name: fmt.Sprintf("%s-%s", vol, podName), + } + var pvc corev1.PersistentVolumeClaim + err := c.Get(context.TODO(), namespacedName, &pvc) + if err != nil { + scopedLog.Error(err, "Unable to find PVC for deletion", "pvcName", pvc.ObjectMeta.Name) + return enterprisev1.PhaseError, err + } + log.Info("Deleting PVC", "pvcName", pvc.ObjectMeta.Name) + err = c.Delete(context.Background(), &pvc) + if err != nil { + scopedLog.Error(err, "Unable to delete PVC", "pvcName", pvc.ObjectMeta.Name) + return enterprisev1.PhaseError, err + } + } + + return enterprisev1.PhaseScalingDown, nil + } + + // ready and no StatefulSet scaling is required + // readyReplicas == desiredReplicas + + // check existing pods for desired updates + for n := readyReplicas - 1; n >= 0; n-- { + // get Pod + podName := fmt.Sprintf("%s-%d", statefulSet.GetName(), n) + namespacedName := types.NamespacedName{Namespace: statefulSet.GetNamespace(), Name: podName} + var pod corev1.Pod + err := c.Get(context.TODO(), namespacedName, &pod) + if err != nil { + scopedLog.Error(err, "Unable to find Pod", "podName", podName) + return enterprisev1.PhaseError, err + } + + // terminate pod if it has pending updates; k8s will start a new one with revised template + if statefulSet.Status.UpdateRevision != "" && statefulSet.Status.UpdateRevision != pod.GetLabels()["controller-revision-hash"] { + // pod needs to be updated; first, prepare it to be recycled + ready, err := mgr.PrepareRecycle(n) + if err != nil { + scopedLog.Error(err, "Unable to prepare Pod for recycling", "podName", podName) + return enterprisev1.PhaseError, err + } + if !ready { + // wait until pod quarantine has completed before deleting it + return enterprisev1.PhaseUpdating, nil + } + + // deleting pod will cause StatefulSet controller to create a new one with latest template + scopedLog.Info("Recycling Pod for updates", "podName", podName, + "statefulSetRevision", statefulSet.Status.UpdateRevision, + "podRevision", pod.GetLabels()["controller-revision-hash"]) + preconditions := client.Preconditions{UID: &pod.ObjectMeta.UID, ResourceVersion: &pod.ObjectMeta.ResourceVersion} + err = c.Delete(context.Background(), &pod, preconditions) + if err != nil { + scopedLog.Error(err, "Unable to delete Pod", "podName", podName) + return enterprisev1.PhaseError, err + } + + // only delete one at a time + return enterprisev1.PhaseUpdating, nil + } + + // check if pod was previously prepared for recycling; if so, complete + complete, err := mgr.FinishRecycle(n) + if err != nil { + scopedLog.Error(err, "Unable to complete recycling of pod", "podName", podName) + return enterprisev1.PhaseError, err + } + if !complete { + // return and wait until next reconcile to let things settle down + return enterprisev1.PhaseUpdating, nil + } } - return result + // all is good! + scopedLog.Info("All pods are ready") + return enterprisev1.PhaseReady, nil } diff --git a/pkg/splunk/reconcile/statefulset_test.go b/pkg/splunk/reconcile/statefulset_test.go index 1cecbf1d6..e6d9a6889 100644 --- a/pkg/splunk/reconcile/statefulset_test.go +++ b/pkg/splunk/reconcile/statefulset_test.go @@ -12,13 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( + "errors" + "fmt" "testing" + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1alpha2" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) func TestApplyStatefulSet(t *testing.T) { @@ -26,7 +31,7 @@ func TestApplyStatefulSet(t *testing.T) { createCalls := map[string][]mockFuncCall{"Get": funcCalls, "Create": funcCalls} updateCalls := map[string][]mockFuncCall{"Get": funcCalls, "Update": funcCalls} var replicas int32 = 1 - current := appsv1.StatefulSet{ + current := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: "splunk-stack1-indexer", Namespace: "test", @@ -36,9 +41,148 @@ func TestApplyStatefulSet(t *testing.T) { }, } revised := current.DeepCopy() - *revised.Spec.Replicas = 3 + revised.Spec.Template.ObjectMeta.Labels = map[string]string{"one": "two"} reconcile := func(c *mockClient, cr interface{}) error { - return ApplyStatefulSet(c, cr.(*appsv1.StatefulSet)) + _, err := ApplyStatefulSet(c, cr.(*appsv1.StatefulSet)) + return err } - reconcileTester(t, "TestApplyStatefulSet", ¤t, revised, createCalls, updateCalls, reconcile) + reconcileTester(t, "TestApplyStatefulSet", current, revised, createCalls, updateCalls, reconcile) +} + +func podManagerUpdateTester(t *testing.T, method string, mgr StatefulSetPodManager, + desiredReplicas int32, wantPhase enterprisev1.ResourcePhase, statefulSet *appsv1.StatefulSet, + wantCalls map[string][]mockFuncCall, wantError error, initObjects ...runtime.Object) { + + // initialize client + c := newMockClient() + for _, obj := range initObjects { + c.state[getStateKey(obj)] = obj + } + + // test update + gotPhase, err := mgr.Update(c, statefulSet, desiredReplicas) + if (err == nil && wantError != nil) || + (err != nil && wantError == nil) || + (err != nil && wantError != nil && err.Error() != wantError.Error()) { + t.Errorf("%s returned error %v; want %v", method, err, wantError) + } + if gotPhase != wantPhase { + t.Errorf("%s returned phase=%s; want %s", method, gotPhase, wantPhase) + } + + // check calls + c.checkCalls(t, method, wantCalls) +} + +func podManagerTester(t *testing.T, method string, mgr StatefulSetPodManager) { + // test create + funcCalls := []mockFuncCall{{metaName: "*v1.StatefulSet-test-splunk-stack1"}} + createCalls := map[string][]mockFuncCall{"Get": funcCalls, "Create": funcCalls} + var replicas int32 = 1 + current := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-stack1", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + Status: appsv1.StatefulSetStatus{ + Replicas: replicas, + ReadyReplicas: replicas, + UpdatedReplicas: replicas, + UpdateRevision: "v1", + }, + } + podManagerUpdateTester(t, method, mgr, 1, enterprisev1.PhasePending, current, createCalls, nil) + + // test update + revised := current.DeepCopy() + revised.Spec.Template.ObjectMeta.Labels = map[string]string{"one": "two"} + updateCalls := map[string][]mockFuncCall{"Get": funcCalls, "Update": funcCalls} + methodPlus := fmt.Sprintf("%s(%s)", method, "Update StatefulSet") + podManagerUpdateTester(t, methodPlus, mgr, 1, enterprisev1.PhaseUpdating, revised, updateCalls, nil, current) + + // test scale up (zero ready so far; wait for ready) + revised = current.DeepCopy() + current.Status.ReadyReplicas = 0 + scaleUpCalls := map[string][]mockFuncCall{"Get": funcCalls} + methodPlus = fmt.Sprintf("%s(%s)", method, "ScalingUp, 0 ready") + podManagerUpdateTester(t, methodPlus, mgr, 1, enterprisev1.PhasePending, revised, scaleUpCalls, nil, current) + + // test scale up (1 ready scaling to 2; wait for ready) + replicas = 2 + current.Status.Replicas = 2 + current.Status.ReadyReplicas = 1 + methodPlus = fmt.Sprintf("%s(%s)", method, "ScalingUp, 1/2 ready") + podManagerUpdateTester(t, methodPlus, mgr, 2, enterprisev1.PhaseScalingUp, revised, scaleUpCalls, nil, current) + + // test scale up (1 ready scaling to 2) + replicas = 1 + current.Status.Replicas = 1 + current.Status.ReadyReplicas = 1 + methodPlus = fmt.Sprintf("%s(%s)", method, "ScalingUp, Update Replicas 1=>2") + podManagerUpdateTester(t, methodPlus, mgr, 2, enterprisev1.PhaseScalingUp, revised, updateCalls, nil, current) + + // test scale down (2 ready, 1 desired) + replicas = 1 + current.Status.Replicas = 1 + current.Status.ReadyReplicas = 2 + methodPlus = fmt.Sprintf("%s(%s)", method, "ScalingDown, Ready > Replicas") + podManagerUpdateTester(t, methodPlus, mgr, 1, enterprisev1.PhaseScalingDown, revised, scaleUpCalls, nil, current) + + // test scale down (2 ready scaling down to 1) + pvcCalls := []mockFuncCall{ + {metaName: "*v1.PersistentVolumeClaim-test-pvc-etc-splunk-stack1-1"}, + {metaName: "*v1.PersistentVolumeClaim-test-pvc-var-splunk-stack1-1"}, + } + scaleDownCalls := map[string][]mockFuncCall{ + "Get": {funcCalls[0], pvcCalls[0], pvcCalls[1]}, + "Update": {funcCalls[0]}, + "Delete": pvcCalls, + } + pvcList := []*corev1.PersistentVolumeClaim{ + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-etc-splunk-stack1-1", Namespace: "test"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "pvc-var-splunk-stack1-1", Namespace: "test"}}, + } + replicas = 2 + current.Status.Replicas = 2 + current.Status.ReadyReplicas = 2 + methodPlus = fmt.Sprintf("%s(%s)", method, "ScalingDown, Update Replicas 2=>1") + podManagerUpdateTester(t, methodPlus, mgr, 1, enterprisev1.PhaseScalingDown, revised, scaleDownCalls, nil, current, pvcList[0], pvcList[1]) + + // test pod not found + replicas = 1 + current.Status.Replicas = 1 + current.Status.ReadyReplicas = 1 + podCalls := []mockFuncCall{funcCalls[0], {metaName: "*v1.Pod-test-splunk-stack1-0"}} + getPodCalls := map[string][]mockFuncCall{"Get": podCalls} + methodPlus = fmt.Sprintf("%s(%s)", method, "Pod not found") + podManagerUpdateTester(t, methodPlus, mgr, 1, enterprisev1.PhaseError, revised, getPodCalls, errors.New("NotFound"), current) + + // test pod updated + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "splunk-stack1-0", + Namespace: "test", + Labels: map[string]string{ + "controller-revision-hash": "v0", + }, + }, + } + updatePodCalls := map[string][]mockFuncCall{"Get": podCalls, "Delete": {podCalls[1]}} + methodPlus = fmt.Sprintf("%s(%s)", method, "Recycle pod") + podManagerUpdateTester(t, methodPlus, mgr, 1, enterprisev1.PhaseUpdating, revised, updatePodCalls, nil, current, pod) + + // test all pods ready + pod.ObjectMeta.Labels["controller-revision-hash"] = "v1" + methodPlus = fmt.Sprintf("%s(%s)", method, "All pods ready") + podManagerUpdateTester(t, methodPlus, mgr, 1, enterprisev1.PhaseReady, revised, getPodCalls, nil, current, pod) +} + +func TestDefaultStatefulSetPodManager(t *testing.T) { + // test for updating + mgr := DefaultStatefulSetPodManager{} + method := "DefaultStatefulSetPodManager.Update" + podManagerTester(t, method, &mgr) } diff --git a/pkg/splunk/reconcile/util.go b/pkg/splunk/reconcile/util.go index bc6cf3098..3f6bde78e 100644 --- a/pkg/splunk/reconcile/util.go +++ b/pkg/splunk/reconcile/util.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "context" @@ -23,13 +23,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + //stdlog "log" + //"github.com/go-logr/stdr" "github.com/splunk/splunk-operator/pkg/splunk/resources" ) -// logger used by splunk.deploy package -var log = logf.Log.WithName("splunk.deploy") +// kubernetes logger used by splunk.reconcile package +var log = logf.Log.WithName("splunk.reconcile") + +// simple stdout logger, used for debugging +//var log = stdr.New(stdlog.New(os.Stderr, "", stdlog.LstdFlags|stdlog.Lshortfile)).WithName("splunk.reconcile") // The ResourceObject type implements methods of runtime.Object and GetObjectMeta() type ResourceObject interface { @@ -47,6 +53,7 @@ func CreateResource(client ControllerClient, obj ResourceObject) error { scopedLog := log.WithName("CreateResource").WithValues( "name", obj.GetObjectMeta().GetName(), "namespace", obj.GetObjectMeta().GetNamespace()) + err := client.Create(context.TODO(), obj) if err != nil && !errors.IsAlreadyExists(err) { @@ -81,87 +88,106 @@ func UpdateResource(client ControllerClient, obj ResourceObject) error { // current. This enables us to minimize updates. It returns true if there // are material differences between them, or false otherwise. func MergePodUpdates(current *corev1.PodTemplateSpec, revised *corev1.PodTemplateSpec, name string) bool { - scopedLog := log.WithName("MergePodUpdates").WithValues("name", name) + result := MergePodSpecUpdates(¤t.Spec, &revised.Spec, name) + if MergePodMetaUpdates(¤t.ObjectMeta, &revised.ObjectMeta, name) { + result = true + } + return result +} + +// MergePodMetaUpdates looks for material differences between a Pod's current +// meta data and a revised meta data. It merges material changes from revised to +// current. This enables us to minimize updates. It returns true if there +// are material differences between them, or false otherwise. +func MergePodMetaUpdates(current *metav1.ObjectMeta, revised *metav1.ObjectMeta, name string) bool { + scopedLog := log.WithName("MergePodMetaUpdates").WithValues("name", name) result := false - // check for changes in Affinity - if resources.CompareByMarshall(current.Spec.Affinity, revised.Spec.Affinity) { - scopedLog.Info("Pod Affinity differs", - "current", current.Spec.Affinity, - "revised", revised.Spec.Affinity) - current.Spec.Affinity = revised.Spec.Affinity + // check Annotations + if !reflect.DeepEqual(current.Annotations, revised.Annotations) { + scopedLog.Info("Container Annotations differ", "current", current.Annotations, "revised", revised.Annotations) + current.Annotations = revised.Annotations result = true } - // check for changes in SchedulerName - if current.Spec.SchedulerName != revised.Spec.SchedulerName { - scopedLog.Info("Pod SchedulerName differs", - "current", current.Spec.SchedulerName, - "revised", revised.Spec.SchedulerName) - current.Spec.SchedulerName = revised.Spec.SchedulerName + // check Labels + if !reflect.DeepEqual(current.Labels, revised.Labels) { + scopedLog.Info("Container Labels differ", "current", current.Labels, "revised", revised.Labels) + current.Labels = revised.Labels result = true } - // check Annotations - if !reflect.DeepEqual(current.ObjectMeta.Annotations, revised.ObjectMeta.Annotations) { - scopedLog.Info("Container Annotations differ", - "current", current.ObjectMeta.Annotations, - "revised", revised.ObjectMeta.Annotations) - current.ObjectMeta.Annotations = revised.ObjectMeta.Annotations + return result +} + +// MergePodSpecUpdates looks for material differences between a Pod's current +// desired spec and a revised spec. It merges material changes from revised to +// current. This enables us to minimize updates. It returns true if there +// are material differences between them, or false otherwise. +func MergePodSpecUpdates(current *corev1.PodSpec, revised *corev1.PodSpec, name string) bool { + scopedLog := log.WithName("MergePodUpdates").WithValues("name", name) + result := false + + // check for changes in Affinity + if resources.CompareByMarshall(current.Affinity, revised.Affinity) { + scopedLog.Info("Pod Affinity differs", + "current", current.Affinity, + "revised", revised.Affinity) + current.Affinity = revised.Affinity result = true } - // check Labels - if !reflect.DeepEqual(current.ObjectMeta.Labels, revised.ObjectMeta.Labels) { - scopedLog.Info("Container Labels differ", - "current", current.ObjectMeta.Labels, - "revised", revised.ObjectMeta.Labels) - current.ObjectMeta.Labels = revised.ObjectMeta.Labels + // check for changes in SchedulerName + if current.SchedulerName != revised.SchedulerName { + scopedLog.Info("Pod SchedulerName differs", + "current", current.SchedulerName, + "revised", revised.SchedulerName) + current.SchedulerName = revised.SchedulerName result = true } // check for changes in container images; assume that the ordering is same for pods with > 1 container - if len(current.Spec.Containers) != len(revised.Spec.Containers) { + if len(current.Containers) != len(revised.Containers) { scopedLog.Info("Pod Container counts differ", - "current", len(current.Spec.Containers), - "revised", len(revised.Spec.Containers)) - current.Spec.Containers = revised.Spec.Containers + "current", len(current.Containers), + "revised", len(revised.Containers)) + current.Containers = revised.Containers result = true } else { - for idx := range current.Spec.Containers { + for idx := range current.Containers { // check Image - if current.Spec.Containers[idx].Image != revised.Spec.Containers[idx].Image { + if current.Containers[idx].Image != revised.Containers[idx].Image { scopedLog.Info("Pod Container Images differ", - "current", current.Spec.Containers[idx].Image, - "revised", revised.Spec.Containers[idx].Image) - current.Spec.Containers[idx].Image = revised.Spec.Containers[idx].Image + "current", current.Containers[idx].Image, + "revised", revised.Containers[idx].Image) + current.Containers[idx].Image = revised.Containers[idx].Image result = true } // check Ports - if resources.CompareContainerPorts(current.Spec.Containers[idx].Ports, revised.Spec.Containers[idx].Ports) { + if resources.CompareContainerPorts(current.Containers[idx].Ports, revised.Containers[idx].Ports) { scopedLog.Info("Pod Container Ports differ", - "current", current.Spec.Containers[idx].Ports, - "revised", revised.Spec.Containers[idx].Ports) - current.Spec.Containers[idx].Ports = revised.Spec.Containers[idx].Ports + "current", current.Containers[idx].Ports, + "revised", revised.Containers[idx].Ports) + current.Containers[idx].Ports = revised.Containers[idx].Ports result = true } // check VolumeMounts - if resources.CompareVolumeMounts(current.Spec.Containers[idx].VolumeMounts, revised.Spec.Containers[idx].VolumeMounts) { + if resources.CompareVolumeMounts(current.Containers[idx].VolumeMounts, revised.Containers[idx].VolumeMounts) { scopedLog.Info("Pod Container VolumeMounts differ", - "current", current.Spec.Containers[idx].VolumeMounts, - "revised", revised.Spec.Containers[idx].VolumeMounts) - current.Spec.Containers[idx].VolumeMounts = revised.Spec.Containers[idx].VolumeMounts + "current", current.Containers[idx].VolumeMounts, + "revised", revised.Containers[idx].VolumeMounts) + current.Containers[idx].VolumeMounts = revised.Containers[idx].VolumeMounts result = true } // check Resources - if resources.CompareByMarshall(¤t.Spec.Containers[idx].Resources, &revised.Spec.Containers[idx].Resources) { + if resources.CompareByMarshall(¤t.Containers[idx].Resources, &revised.Containers[idx].Resources) { scopedLog.Info("Pod Container Resources differ", - "current", current.Spec.Containers[idx].Resources, - "revised", revised.Spec.Containers[idx].Resources) - current.Spec.Containers[idx].Resources = revised.Spec.Containers[idx].Resources + "current", current.Containers[idx].Resources, + "revised", revised.Containers[idx].Resources) + current.Containers[idx].Resources = revised.Containers[idx].Resources result = true } } diff --git a/pkg/splunk/reconcile/util_test.go b/pkg/splunk/reconcile/util_test.go index 223485dbd..96076ac15 100644 --- a/pkg/splunk/reconcile/util_test.go +++ b/pkg/splunk/reconcile/util_test.go @@ -12,10 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package deploy +package reconcile import ( "context" + "errors" "fmt" "reflect" "testing" @@ -36,20 +37,24 @@ func copyResource(dst runtime.Object, src runtime.Object) { *dst.(*corev1.ConfigMap) = *src.(*corev1.ConfigMap) case *corev1.Secret: *dst.(*corev1.Secret) = *src.(*corev1.Secret) + case *corev1.PersistentVolumeClaim: + *dst.(*corev1.PersistentVolumeClaim) = *src.(*corev1.PersistentVolumeClaim) case *corev1.PersistentVolumeClaimList: *dst.(*corev1.PersistentVolumeClaimList) = *src.(*corev1.PersistentVolumeClaimList) case *corev1.Service: *dst.(*corev1.Service) = *src.(*corev1.Service) + case *corev1.Pod: + *dst.(*corev1.Pod) = *src.(*corev1.Pod) case *appsv1.Deployment: *dst.(*appsv1.Deployment) = *src.(*appsv1.Deployment) case *appsv1.StatefulSet: *dst.(*appsv1.StatefulSet) = *src.(*appsv1.StatefulSet) - case *enterprisev1.Indexer: - *dst.(*enterprisev1.Indexer) = *src.(*enterprisev1.Indexer) + case *enterprisev1.IndexerCluster: + *dst.(*enterprisev1.IndexerCluster) = *src.(*enterprisev1.IndexerCluster) case *enterprisev1.LicenseMaster: *dst.(*enterprisev1.LicenseMaster) = *src.(*enterprisev1.LicenseMaster) - case *enterprisev1.SearchHead: - *dst.(*enterprisev1.SearchHead) = *src.(*enterprisev1.SearchHead) + case *enterprisev1.SearchHeadCluster: + *dst.(*enterprisev1.SearchHeadCluster) = *src.(*enterprisev1.SearchHeadCluster) case *enterprisev1.Spark: *dst.(*enterprisev1.Spark) = *src.(*enterprisev1.Spark) case *enterprisev1.Standalone: @@ -124,6 +129,9 @@ type mockClient struct { // calls is a record of all mockClient function calls calls map[string][]mockFuncCall + + // error returned when an object is not found + notFoundError error } // Get returns mock client's err field @@ -138,7 +146,7 @@ func (c mockClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.O copyResource(obj, getObj.(runtime.Object)) return nil } - return fmt.Errorf("NotFound") + return c.notFoundError } // List returns mock client's err field @@ -153,7 +161,7 @@ func (c mockClient) List(ctx context.Context, obj runtime.Object, opts ...client copyResource(obj, listObj.(runtime.Object)) return nil } - return fmt.Errorf("NotFound") + return c.notFoundError } // Create returns mock client's err field @@ -214,6 +222,11 @@ func (c *mockClient) resetCalls() { c.calls = make(map[string][]mockFuncCall) } +// resetState resets the state of the mockClient +func (c *mockClient) resetState() { + c.state = make(map[string]interface{}) +} + // checkCalls verifies that the wanted function calls were performed func (c *mockClient) checkCalls(t *testing.T, testname string, wantCalls map[string][]mockFuncCall) { notEmptyWantCalls := 0 @@ -258,15 +271,16 @@ func (c *mockClient) checkCalls(t *testing.T, testname string, wantCalls map[str } if notEmptyWantCalls != len(c.calls) { - t.Errorf("%s: MockClient functions called = %d; want %d", testname, len(c.calls), len(wantCalls)) + t.Errorf("%s: MockClient functions called = %d; want %d: calls=%v", testname, len(c.calls), len(wantCalls), c.calls) } } // newMockClient is used to create and initialize a new mock client func newMockClient() *mockClient { c := &mockClient{ - state: make(map[string]interface{}), - calls: make(map[string][]mockFuncCall), + state: make(map[string]interface{}), + calls: make(map[string][]mockFuncCall), + notFoundError: errors.New("NotFound"), } return c } @@ -284,35 +298,30 @@ func reconcileTester(t *testing.T, method string, } // test create new + methodPlus := fmt.Sprintf("%s(create)", method) err := reconcile(c, current) if err != nil { - t.Errorf("%s() returned %v; want nil", method, err) + t.Errorf("%s returned %v; want nil", methodPlus, err) } - c.checkCalls(t, method, createCalls) + c.checkCalls(t, methodPlus, createCalls) // test no updates required for current + methodPlus = fmt.Sprintf("%s(update-no-change)", method) c.resetCalls() err = reconcile(c, current) if err != nil { - t.Errorf("%s() returned %v; want nil", method, err) + t.Errorf("%s returned %v; want nil", methodPlus, err) } - c.checkCalls(t, method, map[string][]mockFuncCall{"Get": createCalls["Get"]}) + c.checkCalls(t, methodPlus, map[string][]mockFuncCall{"Get": createCalls["Get"]}) // test updates required + methodPlus = fmt.Sprintf("%s(update-with-change)", method) c.resetCalls() err = reconcile(c, revised) if err != nil { - t.Errorf("%s() returned %v; want nil", method, err) - } - c.checkCalls(t, method, updateCalls) - - // test no updates required for revised - c.resetCalls() - err = reconcile(c, revised) - if err != nil { - t.Errorf("%s() returned %v; want nil", method, err) + t.Errorf("%s returned %v; want nil", methodPlus, err) } - c.checkCalls(t, method, map[string][]mockFuncCall{"Get": updateCalls["Get"]}) + c.checkCalls(t, methodPlus, updateCalls) } func TestCreateResource(t *testing.T) { diff --git a/pkg/splunk/spark/names.go b/pkg/splunk/spark/names.go index a135a7b92..2bdcb974f 100644 --- a/pkg/splunk/spark/names.go +++ b/pkg/splunk/spark/names.go @@ -56,7 +56,7 @@ func GetSparkImage(specImage string) string { if specImage != "" { name = specImage } else { - name = os.Getenv("SPARK_IMAGE") + name = os.Getenv("RELATED_IMAGE_SPLUNK_SPARK") if name == "" { name = defaultSparkImage } diff --git a/pkg/splunk/spark/names_test.go b/pkg/splunk/spark/names_test.go index 0656a96fc..ab946338d 100644 --- a/pkg/splunk/spark/names_test.go +++ b/pkg/splunk/spark/names_test.go @@ -60,7 +60,7 @@ func TestGetSparkImage(t *testing.T) { test("splunk/spark") - os.Setenv("SPARK_IMAGE", "splunk-test/spark") + os.Setenv("RELATED_IMAGE_SPLUNK_SPARK", "splunk-test/spark") test("splunk-test/spark") specImage = "splunk/spark-test" diff --git a/pkg/splunk/test/client.go b/pkg/splunk/test/client.go new file mode 100644 index 000000000..796650492 --- /dev/null +++ b/pkg/splunk/test/client.go @@ -0,0 +1,95 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "errors" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" +) + +// MockHTTPHandler is used to handle an HTTP request for a given URL +type MockHTTPHandler struct { + Method string + URL string + Status int + Err error + Body string +} + +// MockHTTPClient is used to replicate an http.Client for unit tests +type MockHTTPClient struct { + WantRequests []*http.Request + GotRequests []*http.Request + Handlers map[string]MockHTTPHandler +} + +// getHandlerKey method for MockHTTPClient returns map key for a HTTP request +func (c *MockHTTPClient) getHandlerKey(req *http.Request) string { + return fmt.Sprintf("%s %s", req.Method, req.URL.String()) +} + +// Do method for MockHTTPClient just tracks the requests that it receives +func (c *MockHTTPClient) Do(req *http.Request) (*http.Response, error) { + c.GotRequests = append(c.GotRequests, req) + rsp, ok := c.Handlers[c.getHandlerKey(req)] + if !ok { + return nil, errors.New("NotFound") + } + httpResponse := http.Response{ + StatusCode: rsp.Status, + Body: ioutil.NopCloser(strings.NewReader(rsp.Body)), + } + return &httpResponse, rsp.Err +} + +// AddHandler method for MockHTTPClient adds a wanted request and response to use for it +func (c *MockHTTPClient) AddHandler(req *http.Request, status int, body string, err error) { + c.WantRequests = append(c.WantRequests, req) + if c.Handlers == nil { + c.Handlers = make(map[string]MockHTTPHandler) + } + c.Handlers[c.getHandlerKey(req)] = MockHTTPHandler{ + Method: req.Method, + URL: req.URL.String(), + Status: status, + Err: err, + Body: body, + } +} + +// AddHandlers method for MockHTTPClient adds a wanted requests and responses +func (c *MockHTTPClient) AddHandlers(handlers ...MockHTTPHandler) { + for n := range handlers { + req, _ := http.NewRequest(handlers[n].Method, handlers[n].URL, nil) + c.AddHandler(req, handlers[n].Status, handlers[n].Body, handlers[n].Err) + } +} + +// CheckRequests method for MockHTTPClient checks if requests received matches requests that we want +func (c *MockHTTPClient) CheckRequests(t *testing.T, testMethod string) { + if len(c.GotRequests) != len(c.WantRequests) { + t.Fatalf("%s got %d Requests; want %d", testMethod, len(c.GotRequests), len(c.WantRequests)) + } + for n := range c.GotRequests { + if !reflect.DeepEqual(c.GotRequests[n].URL.String(), c.WantRequests[n].URL.String()) { + t.Errorf("%s GotRequests[%d]=%v; want %v", testMethod, n, c.GotRequests[n].URL.String(), c.WantRequests[n].URL.String()) + } + } +} diff --git a/pkg/splunk/test/doc.go b/pkg/splunk/test/doc.go new file mode 100644 index 000000000..01dec0154 --- /dev/null +++ b/pkg/splunk/test/doc.go @@ -0,0 +1,18 @@ +// Copyright (c) 2018-2020 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package test includes common code used for testing other modules. +*/ +package test