diff --git a/.circleci/config.yml b/.circleci/config.yml index 6b7336ab3..5e8718c93 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -106,7 +106,7 @@ executors: # Set cluster workers to 5 (5 cluster nodes) # NUM_NODES represent number of parallel test executions # NUM_WORKERS represent number of nodes in a k8 cluster - NUM_NODES: 5 + NUM_NODES: 3 NUM_WORKERS: 8 CLUSTER_NAME: eks-integration-test-cluster ENTERPRISE_IMAGE_NAME: splunk/splunk:edge @@ -433,4 +433,4 @@ jobs: name: Delete cluster command: | make cluster-down - no_output_timeout: 30m + no_output_timeout: 30m \ No newline at end of file diff --git a/build/make_bundle.sh b/build/make_bundle.sh index ba5dca396..8aa64a0e0 100755 --- a/build/make_bundle.sh +++ b/build/make_bundle.sh @@ -5,13 +5,14 @@ set -e VERSION=`grep "Version.*=.*\".*\"" version/version.go | sed "s,.*Version.*=.*\"\(.*\)\".*,\1,"` -OLD_VERSIONS="v1beta1 v1alpha3 v1alpha2" +OLD_VERSIONS="v1 v1beta1 v1alpha3 v1alpha2" DOCKER_IO_PATH="docker.io/splunk" REDHAT_REGISTRY_PATH="registry.connect.redhat.com/splunk" OPERATOR_IMAGE="$DOCKER_IO_PATH/splunk-operator:${VERSION}" OLM_CATALOG=deploy/olm-catalog OLM_CERTIFIED=deploy/olm-certified YAML_SCRIPT_FILE=.yq_script.yaml +CRDS_PATH="deploy/crds" # create yq template to append older CRD versions rm -f $YAML_SCRIPT_FILE @@ -90,7 +91,7 @@ cat << EOF >$YAML_SCRIPT_FILE path: metadata.annotations.alm-examples value: |- [{ - "apiVersion": "enterprise.splunk.com/v1", + "apiVersion": "enterprise.splunk.com/v2", "kind": "IndexerCluster", "metadata": { "name": "example", @@ -101,7 +102,7 @@ cat << EOF >$YAML_SCRIPT_FILE } }, { - "apiVersion": "enterprise.splunk.com/v1", + "apiVersion": "enterprise.splunk.com/v2", "kind": "LicenseMaster", "metadata": { "name": "example", @@ -110,7 +111,7 @@ cat << EOF >$YAML_SCRIPT_FILE "spec": {} }, { - "apiVersion": "enterprise.splunk.com/v1", + "apiVersion": "enterprise.splunk.com/v2", "kind": "SearchHeadCluster", "metadata": { "name": "example", @@ -121,7 +122,7 @@ cat << EOF >$YAML_SCRIPT_FILE } }, { - "apiVersion": "enterprise.splunk.com/v1", + "apiVersion": "enterprise.splunk.com/v2", "kind": "Standalone", "metadata": { "name": "example", @@ -145,3 +146,22 @@ yq w $OLM_CATALOG/splunk/splunk.package.yaml packageName "splunk-certified" > $O # Mac OS expects sed -i '', Linux expects sed -i''. To workaround this, using .bak zip $OLM_CERTIFIED/splunk.zip -j $OLM_CERTIFIED/splunk $OLM_CERTIFIED/splunk/* + +# This adds the 'protocol' field back to the CRDs, when we try to run make package or make generate. +# NOTE: This is a temporary fix and should not be needed in future operator-sdk upgrades. +function updateCRDS { + for crd in `ls $1` + do + echo Updating crd: $crd + line_num=`grep -n "x-kubernetes-list-map-keys" $1/$crd | awk -F ":" '{print$1}'` + line_num=$(($line_num-2)) + awk 'NR==v1{print " - protocol"}1' v1="${line_num}" $1/$crd > tmp.out + mv tmp.out $1/$crd + done +} + +echo Updating $CRDS_PATH +updateCRDS $CRDS_PATH + +echo Updating $OLM_CATALOG/splunk/$VERSION +updateCRDS $OLM_CATALOG/splunk/$VERSION diff --git a/deploy/crds/enterprise.splunk.com_clustermasters_crd.yaml b/deploy/crds/enterprise.splunk.com_clustermasters_crd.yaml index 8d2c968be..a37c7cfe7 100644 --- a/deploy/crds/enterprise.splunk.com_clustermasters_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_clustermasters_crd.yaml @@ -18,7 +18,7 @@ spec: jsonPath: .status.phase name: Phase type: string - name: v1 + name: v2 schema: openAPIV3Schema: description: ClusterMaster is the Schema for the clustermasters API @@ -633,6 +633,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -856,6 +934,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer resources: description: resource requirements for the pod containers properties: @@ -1248,8 +1340,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1260,9 +1351,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object @@ -2534,6 +2632,142 @@ spec: status: description: ClusterMasterStatus defines the observed state of ClusterMaster properties: + appContext: + description: App Framework status + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object bundlePushInfo: description: Bundle push status tracker properties: @@ -2648,8 +2882,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2660,9 +2893,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object @@ -2672,6 +2912,15 @@ spec: storage: true subresources: status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string - name: v1beta1 served: true storage: false diff --git a/deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml b/deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml index a45fa2b16..d384c7c82 100644 --- a/deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_indexerclusters_crd.yaml @@ -35,7 +35,7 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1 + name: v2 schema: openAPIV3Schema: description: IndexerCluster is the Schema for a Splunk Enterprise indexer @@ -875,6 +875,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer replicas: description: Number of search head pods; a search head cluster will be created if > 1 @@ -2555,6 +2569,15 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string - name: v1beta1 served: true storage: false diff --git a/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml b/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml index e0e619471..2bb45dcbc 100644 --- a/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_licensemasters_crd.yaml @@ -22,7 +22,7 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1 + name: v2 schema: openAPIV3Schema: description: LicenseMaster is the Schema for a Splunk Enterprise license master. @@ -638,6 +638,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -861,6 +939,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer resources: description: resource requirements for the pod containers properties: @@ -2436,6 +2528,142 @@ spec: description: LicenseMasterStatus defines the observed state of a Splunk Enterprise license master. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the license master enum: @@ -2453,6 +2681,15 @@ spec: storage: true subresources: status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string - name: v1beta1 served: true storage: false diff --git a/deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml b/deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml index 7c7ef230b..64798ec92 100644 --- a/deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_searchheadclusters_crd.yaml @@ -34,7 +34,7 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1 + name: v2 schema: openAPIV3Schema: description: SearchHeadCluster is the Schema for a Splunk Enterprise search @@ -651,6 +651,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -874,6 +952,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer replicas: description: Number of search head pods; a search head cluster will be created if > 1 @@ -2465,6 +2557,142 @@ spec: items: type: boolean type: array + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object captain: description: name or label of the search head captain type: string @@ -2562,6 +2790,15 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string - name: v1beta1 served: true storage: false diff --git a/deploy/crds/enterprise.splunk.com_standalones_crd.yaml b/deploy/crds/enterprise.splunk.com_standalones_crd.yaml index fc477e27b..e31b397ac 100644 --- a/deploy/crds/enterprise.splunk.com_standalones_crd.yaml +++ b/deploy/crds/enterprise.splunk.com_standalones_crd.yaml @@ -30,7 +30,7 @@ spec: jsonPath: .metadata.creationTimestamp name: Age type: date - name: v1 + name: v2 schema: openAPIV3Schema: description: Standalone is the Schema for a Splunk Enterprise standalone instances. @@ -646,6 +646,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -869,6 +947,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer replicas: description: Number of standalone pods format: int32 @@ -1265,8 +1357,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1277,9 +1368,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object @@ -2552,6 +2650,142 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the standalone instances enum: @@ -2665,8 +2899,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2677,9 +2910,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object @@ -2693,6 +2933,15 @@ spec: specReplicasPath: .spec.replicas statusReplicasPath: .status.replicas status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string - name: v1beta1 served: true storage: false diff --git a/deploy/examples/advanced/c1.yaml b/deploy/examples/advanced/c1.yaml index eac61f624..ba638f0f4 100644 --- a/deploy/examples/advanced/c1.yaml +++ b/deploy/examples/advanced/c1.yaml @@ -1,4 +1,4 @@ -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: LicenseMaster metadata: name: lm-example @@ -11,7 +11,7 @@ spec: name: splunk-licenses licenseUrl: /mnt/licenses/enterprise.lic --- -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: ClusterMaster metadata: name: cm-example @@ -42,20 +42,20 @@ spec: apps_location: - "/mnt/apps/.tgz" --- -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: IndexerCluster metadata: name: idxc-example finalizers: - enterprise.splunk.com/delete-pvc spec: - replicas: + replicas: 4 clusterMasterRef: name: cm-example licenseMasterRef: name: lm-example --- -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: sh-example diff --git a/deploy/examples/clustermaster/default.yaml b/deploy/examples/clustermaster/default.yaml index bc7f7f870..1a4e846ea 100644 --- a/deploy/examples/clustermaster/default.yaml +++ b/deploy/examples/clustermaster/default.yaml @@ -1,5 +1,5 @@ -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: ClusterMaster metadata: name: test -spec: {} \ No newline at end of file +spec: {} diff --git a/deploy/examples/indexercluster/default.yaml b/deploy/examples/indexercluster/default.yaml index a84688357..5e34cc716 100644 --- a/deploy/examples/indexercluster/default.yaml +++ b/deploy/examples/indexercluster/default.yaml @@ -1,4 +1,4 @@ -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: IndexerCluster metadata: name: test diff --git a/deploy/examples/licensemaster/default.yaml b/deploy/examples/licensemaster/default.yaml index 5014739b0..d8f439c03 100644 --- a/deploy/examples/licensemaster/default.yaml +++ b/deploy/examples/licensemaster/default.yaml @@ -1,4 +1,4 @@ -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: LicenseMaster metadata: name: test diff --git a/deploy/examples/searchheadcluster/default.yaml b/deploy/examples/searchheadcluster/default.yaml index c127b373c..01b8876ff 100644 --- a/deploy/examples/searchheadcluster/default.yaml +++ b/deploy/examples/searchheadcluster/default.yaml @@ -1,4 +1,4 @@ -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: SearchHeadCluster metadata: name: test diff --git a/deploy/examples/standalone/default.yaml b/deploy/examples/standalone/default.yaml index f85a63ed6..60b19112d 100644 --- a/deploy/examples/standalone/default.yaml +++ b/deploy/examples/standalone/default.yaml @@ -1,4 +1,4 @@ -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: test diff --git a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_clustermasters_crd.yaml b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_clustermasters_crd.yaml index c194ad2ed..ab47ed10d 100644 --- a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_clustermasters_crd.yaml +++ b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_clustermasters_crd.yaml @@ -614,6 +614,39 @@ spec: type: array type: object type: object + appFrameworkRef: + description: App Framework configuration. Refers to the config block + for App Framework. Through this config, apps can be installed in an + Indexer Cluster. The implementatiom is still TBD. + properties: + featureEnabled: + description: Flag to Enable/Disable Application Framework Feature. + Default value is False. Implementation of Apps Framework is still + TBD so turning this flag to true will not do any changes. + type: boolean + s3Bucket: + description: App Package Remote Store Bucket Name of the s3 bucket + within s3Endpoint where splunk apps packages can be placed. + type: string + s3Endpoint: + description: App Package Remote Store Endpoint. This is s3 location + where you will have the splunk apps packages placed. + type: string + s3PollInterval: + description: App Package Remote Store Polling interval in minutes. + New or Updated Apps will be pulled from s3 remote location at + every polling interval. This value can be >=1. The default value + is 60 minutes. + type: integer + s3SecretRef: + description: App Package Remote Store Credentials Secret object + containing the S3 authentication info. + type: string + type: + description: App Package Remote Store type. The currently supported + type is s3 only. + type: string + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes diff --git a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_licensemasters_crd.yaml b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_licensemasters_crd.yaml index b2fe9ab56..3e63ce98e 100644 --- a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_licensemasters_crd.yaml +++ b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_licensemasters_crd.yaml @@ -619,6 +619,39 @@ spec: type: array type: object type: object + appFrameworkRef: + description: App Framework configuration. Refers to the config block + for App Framework. Through this config, apps can be installed in a + LicenseMaster splunk CR. The implementatiom is still TBD. + properties: + featureEnabled: + description: Flag to Enable/Disable Application Framework Feature. + Default value is False. Implementation of Apps Framework is still + TBD so turning this flag to true will not do any changes. + type: boolean + s3Bucket: + description: App Package Remote Store Bucket Name of the s3 bucket + within s3Endpoint where splunk apps packages can be placed. + type: string + s3Endpoint: + description: App Package Remote Store Endpoint. This is s3 location + where you will have the splunk apps packages placed. + type: string + s3PollInterval: + description: App Package Remote Store Polling interval in minutes. + New or Updated Apps will be pulled from s3 remote location at + every polling interval. This value can be >=1. The default value + is 60 minutes. + type: integer + s3SecretRef: + description: App Package Remote Store Credentials Secret object + containing the S3 authentication info. + type: string + type: + description: App Package Remote Store type. The currently supported + type is s3 only. + type: string + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes diff --git a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_searchheadclusters_crd.yaml b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_searchheadclusters_crd.yaml index 4210949c2..cc690e802 100644 --- a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_searchheadclusters_crd.yaml +++ b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_searchheadclusters_crd.yaml @@ -636,6 +636,39 @@ spec: type: array type: object type: object + appFrameworkRef: + description: App Framework configuration. Refers to the config block + for App Framework. Through this config, apps can be installed in a + standalone splunk CR. The implementatiom is still TBD. + properties: + featureEnabled: + description: Flag to Enable/Disable Application Framework Feature. + Default value is False. Implementation of Apps Framework is still + TBD so turning this flag to true will not do any changes. + type: boolean + s3Bucket: + description: App Package Remote Store Bucket Name of the s3 bucket + within s3Endpoint where splunk apps packages can be placed. + type: string + s3Endpoint: + description: App Package Remote Store Endpoint. This is s3 location + where you will have the splunk apps packages placed. + type: string + s3PollInterval: + description: App Package Remote Store Polling interval in minutes. + New or Updated Apps will be pulled from s3 remote location at + every polling interval. This value can be >=1. The default value + is 60 minutes. + type: integer + s3SecretRef: + description: App Package Remote Store Credentials Secret object + containing the S3 authentication info. + type: string + type: + description: App Package Remote Store type. The currently supported + type is s3 only. + type: string + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes diff --git a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_standalones_crd.yaml b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_standalones_crd.yaml index ec058bd40..a109f7c1f 100644 --- a/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_standalones_crd.yaml +++ b/deploy/olm-catalog/splunk/0.2.1/enterprise.splunk.com_standalones_crd.yaml @@ -631,6 +631,39 @@ spec: type: array type: object type: object + appFrameworkRef: + description: App Framework configuration. Refers to the config block + for App Framework. Through this config, apps can be installed in a + standalone splunk CR. The implementatiom is still TBD. + properties: + featureEnabled: + description: Flag to Enable/Disable Application Framework Feature. + Default value is False. Implementation of Apps Framework is still + TBD so turning this flag to true will not do any changes. + type: boolean + s3Bucket: + description: App Package Remote Store Bucket Name of the s3 bucket + within s3Endpoint where splunk apps packages can be placed. + type: string + s3Endpoint: + description: App Package Remote Store Endpoint. This is s3 location + where you will have the splunk apps packages placed. + type: string + s3PollInterval: + description: App Package Remote Store Polling interval in minutes. + New or Updated Apps will be pulled from s3 remote location at + every polling interval. This value can be >=1. The default value + is 60 minutes. + type: integer + s3SecretRef: + description: App Package Remote Store Credentials Secret object + containing the S3 authentication info. + type: string + type: + description: App Package Remote Store type. The currently supported + type is s3 only. + type: string + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes diff --git a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_clustermasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_clustermasters_crd.yaml index 322731b0b..ae1500d19 100644 --- a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_clustermasters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_clustermasters_crd.yaml @@ -614,6 +614,72 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in this + location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes + type: integer + defaults: + description: Defines default configuration settings for App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. Scope + determines whether the App(s) is/are installed locally or + cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1080,8 +1146,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1095,6 +1160,9 @@ spec: secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object @@ -2335,6 +2403,84 @@ spec: status: description: ClusterMasterStatus defines the observed state of ClusterMaster properties: + appContext: + description: App Framework status + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes + type: integer + defaults: + description: Defines default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the App deployment + in progress + type: boolean + version: + description: App Framework version info for future use + type: integer + type: object bundlePushInfo: description: Bundle push status tracker properties: @@ -2448,8 +2594,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2463,6 +2608,9 @@ spec: secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object diff --git a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_licensemasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_licensemasters_crd.yaml index 864945feb..cf91db07a 100644 --- a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_licensemasters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_licensemasters_crd.yaml @@ -619,6 +619,72 @@ spec: type: array type: object type: object + appRepo: + description: Splunk enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in this + location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes + type: integer + defaults: + description: Defines default configuration settings for App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. Scope + determines whether the App(s) is/are installed locally or + cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -2238,6 +2304,84 @@ spec: description: LicenseMasterStatus defines the observed state of a Splunk Enterprise license master. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes + type: integer + defaults: + description: Defines default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the App deployment + in progress + type: boolean + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the license master enum: diff --git a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_searchheadclusters_crd.yaml b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_searchheadclusters_crd.yaml index 4ca5f8f7f..1ff08b771 100644 --- a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_searchheadclusters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_searchheadclusters_crd.yaml @@ -636,6 +636,72 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in this + location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes + type: integer + defaults: + description: Defines default configuration settings for App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. Scope + determines whether the App(s) is/are installed locally or + cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -2271,6 +2337,84 @@ spec: items: type: boolean type: array + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes + type: integer + defaults: + description: Defines default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the App deployment + in progress + type: boolean + version: + description: App Framework version info for future use + type: integer + type: object captain: description: name or label of the search head captain type: string diff --git a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_standalones_crd.yaml b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_standalones_crd.yaml index dad63276f..c959b0a5c 100644 --- a/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_standalones_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0-RC/enterprise.splunk.com_standalones_crd.yaml @@ -631,6 +631,72 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in this + location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes + type: integer + defaults: + description: Defines default configuration settings for App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. Scope + determines whether the App(s) is/are installed locally or + cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1101,8 +1167,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1116,6 +1181,9 @@ spec: secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object @@ -2357,6 +2425,84 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes + type: integer + defaults: + description: Defines default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the App deployment + in progress + type: boolean + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the standalone instances enum: @@ -2469,8 +2615,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2484,6 +2629,9 @@ spec: secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object diff --git a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_clustermasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_clustermasters_crd.yaml index 90dc7d61b..3d77206f8 100644 --- a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_clustermasters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_clustermasters_crd.yaml @@ -633,6 +633,85 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1247,8 +1326,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1259,9 +1337,17 @@ spec: path: description: Remote volume path type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object @@ -2533,6 +2619,142 @@ spec: status: description: ClusterMasterStatus defines the observed state of ClusterMaster properties: + appContext: + description: App Framework status + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For + e.g. aws, azure, minio, etc. Currently we are only + supporting aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object bundlePushInfo: description: Bundle push status tracker properties: @@ -2647,8 +2869,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2659,9 +2880,17 @@ spec: path: description: Remote volume path type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object diff --git a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_licensemasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_licensemasters_crd.yaml index bcd23291e..b0b359fcf 100644 --- a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_licensemasters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_licensemasters_crd.yaml @@ -638,6 +638,85 @@ spec: type: array type: object type: object + appRepo: + description: Splunk enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -2435,6 +2514,142 @@ spec: description: LicenseMasterStatus defines the observed state of a Splunk Enterprise license master. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For + e.g. aws, azure, minio, etc. Currently we are only + supporting aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the license master enum: diff --git a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_searchheadclusters_crd.yaml b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_searchheadclusters_crd.yaml index fd0605c89..25787f23c 100644 --- a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_searchheadclusters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_searchheadclusters_crd.yaml @@ -651,6 +651,85 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -2464,6 +2543,142 @@ spec: items: type: boolean type: array + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For + e.g. aws, azure, minio, etc. Currently we are only + supporting aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object captain: description: name or label of the search head captain type: string diff --git a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_standalones_crd.yaml b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_standalones_crd.yaml index 449d3719d..4a6bb504f 100644 --- a/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_standalones_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.0/enterprise.splunk.com_standalones_crd.yaml @@ -646,6 +646,85 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1264,8 +1343,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1276,9 +1354,17 @@ spec: path: description: Remote volume path type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object @@ -2551,6 +2637,142 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: App Package Remote Store provider. For + e.g. aws, azure, minio, etc. Currently we are only + supporting aws. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the standalone instances enum: @@ -2664,8 +2886,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2676,9 +2897,17 @@ spec: path: description: Remote volume path type: string + provider: + description: App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. + type: string secretRef: description: Secret object name type: string + storageType: + description: Remote Storage type. + type: string type: object type: array type: object diff --git a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_clustermasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_clustermasters_crd.yaml index 90dc7d61b..8dccb0c15 100644 --- a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_clustermasters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_clustermasters_crd.yaml @@ -633,6 +633,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -856,6 +934,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer resources: description: resource requirements for the pod containers properties: @@ -1247,8 +1339,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1259,9 +1350,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object @@ -2533,6 +2631,142 @@ spec: status: description: ClusterMasterStatus defines the observed state of ClusterMaster properties: + appContext: + description: App Framework status + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object bundlePushInfo: description: Bundle push status tracker properties: @@ -2647,8 +2881,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2659,9 +2892,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object diff --git a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_indexerclusters_crd.yaml b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_indexerclusters_crd.yaml index a94e40ecf..ff0cef3d9 100644 --- a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_indexerclusters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_indexerclusters_crd.yaml @@ -875,6 +875,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer replicas: description: Number of search head pods; a search head cluster will be created if > 1 diff --git a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_licensemasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_licensemasters_crd.yaml index bcd23291e..5c3b28aab 100644 --- a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_licensemasters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_licensemasters_crd.yaml @@ -638,6 +638,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -861,6 +939,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer resources: description: resource requirements for the pod containers properties: @@ -2435,6 +2527,142 @@ spec: description: LicenseMasterStatus defines the observed state of a Splunk Enterprise license master. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the license master enum: diff --git a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_searchheadclusters_crd.yaml b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_searchheadclusters_crd.yaml index fd0605c89..d2b5c3014 100644 --- a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_searchheadclusters_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_searchheadclusters_crd.yaml @@ -651,6 +651,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -874,6 +952,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer replicas: description: Number of search head pods; a search head cluster will be created if > 1 @@ -2464,6 +2556,142 @@ spec: items: type: boolean type: array + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object captain: description: name or label of the search head captain type: string diff --git a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_standalones_crd.yaml b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_standalones_crd.yaml index 449d3719d..e0a6ed7a5 100644 --- a/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_standalones_crd.yaml +++ b/deploy/olm-catalog/splunk/1.0.1/enterprise.splunk.com_standalones_crd.yaml @@ -646,6 +646,84 @@ spec: type: array type: object type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object clusterMasterRef: description: ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -869,6 +947,20 @@ spec: licenseUrl: description: Full path or URL for a Splunk Enterprise license file type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer replicas: description: Number of standalone pods format: int32 @@ -1264,8 +1356,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -1276,9 +1367,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object @@ -2551,6 +2649,142 @@ spec: description: StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object phase: description: current phase of the standalone instances enum: @@ -2664,8 +2898,7 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume name and remote - volume URI + description: VolumeSpec defines remote volume config properties: endpoint: description: Remote volume URI @@ -2676,9 +2909,16 @@ spec: path: description: Remote volume path type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string secretRef: description: Secret object name type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string type: object type: array type: object diff --git a/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_clustermasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_clustermasters_crd.yaml new file mode 100644 index 000000000..a37c7cfe7 --- /dev/null +++ b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_clustermasters_crd.yaml @@ -0,0 +1,2950 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clustermasters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: ClusterMaster + listKind: ClusterMasterList + plural: clustermasters + shortNames: + - cm-idxc + singular: clustermaster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of cluster master + jsonPath: .status.phase + name: Phase + type: string + name: v2 + schema: + openAPIV3Schema: + description: ClusterMaster is the Schema for the clustermasters API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterMasterSpec defines the desired state of ClusterMaster + properties: + Mock: + description: Mock to differentiate between UTs and actual reconcile + type: boolean + affinity: + description: Kubernetes Affinity rules that control how pods are assigned + to particular nodes. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + clusterMasterRef: + description: ClusterMasterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + defaults: + description: Inline map of default.yml overrides used to initialize + the environment + type: string + defaultsUrl: + description: Full path or URL for one or more default.yml files, separated + by commas + type: string + defaultsUrlApps: + description: Full path or URL for one or more defaults.yml files specific + to App install, separated by commas. The defaults listed here will + be installed on the CM, standalone, search head deployer or license + master instance. + type: string + etcVolumeStorageConfig: + description: Storage configuration for /opt/splunk/etc volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + extraEnv: + description: 'ExtraEnv refers to extra environment variables to be + passed to the Splunk instance containers WARNING: Setting environment + variables used by Splunk or Ansible will affect Splunk installation + and operation' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE + environment variables) + type: string + imagePullPolicy: + description: 'Sets pull policy for all images (either “Always” or + the default: “IfNotPresent”)' + enum: + - Always + - IfNotPresent + type: string + licenseMasterRef: + description: LicenseMasterRef refers to a Splunk Enterprise license + master managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + licenseUrl: + description: Full path or URL for a Splunk Enterprise license file + type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer + resources: + description: resource requirements for the pod containers + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + schedulerName: + description: Name of Scheduler to use for pod placement (defaults + to “default-scheduler”) + type: string + serviceAccount: + description: ServiceAccount is the service account used by the pods + deployed by the CRD. If not specified uses the default serviceAccount + for the namespace as per https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + type: string + serviceTemplate: + description: ServiceTemplate is a template used to create Kubernetes + services + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + clusterIP: + description: 'clusterIP is the IP address of the service and + is usually assigned randomly by the master. If an address + is specified manually and is not in use by others, it will + be allocated to the service; otherwise, creation of the + service will fail. This field can not be changed through + updates. Valid values are "None", empty string (""), or + a valid IP address. "None" can be specified for headless + services when proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. Ignored if + type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + externalIPs: + description: externalIPs is a list of IP addresses for which + nodes in the cluster will also accept traffic for this service. These + IPs are not managed by Kubernetes. The user is responsible + for ensuring that traffic arrives at a node with this IP. A + common example is external load-balancers that are not part + of the Kubernetes system. + items: + type: string + type: array + externalName: + description: externalName is the external reference that kubedns + or equivalent will return as a CNAME record for this service. + No proxying will be involved. Must be a valid RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires Type + to be ExternalName. + type: string + externalTrafficPolicy: + description: externalTrafficPolicy denotes if this Service + desires to route external traffic to node-local or cluster-wide + endpoints. "Local" preserves the client source IP and avoids + a second hop for LoadBalancer and Nodeport type services, + but risks potentially imbalanced traffic spreading. "Cluster" + obscures the client source IP and may cause a second hop + to another node, but should have good overall load-spreading. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. If not specified, HealthCheckNodePort + is created by the service api backend with the allocated + nodePort. Will use user-specified nodePort value if specified + by the client. Only effects when Type is set to LoadBalancer + and ExternalTrafficPolicy is set to Local. + format: int32 + type: integer + ipFamily: + description: ipFamily specifies whether this Service has a + preference for a particular IP family (e.g. IPv4 vs. IPv6). If + a specific IP family is requested, the clusterIP field will + be allocated from that family, if it is available in the + cluster. If no IP family is requested, the cluster's primary + IP family will be used. Other IP fields (loadBalancerIP, + loadBalancerSourceRanges, externalIPs) and controllers which + allocate external load-balancers should use the same IP + family. Endpoints for this Service will be of this family. This + field is immutable after creation. Assigning a ServiceIPFamily + not available in the cluster (e.g. IPv6 in IPv4 only cluster) + is an error condition and will fail during clusterIP assignment. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer LoadBalancer + will get created with the IP specified in this field. This + feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider load-balancer + will be restricted to the specified client IPs. This field + will be ignored if the cloud-provider does not support the + feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this port. + This field follows standard Kubernetes label syntax. + Un-prefixed names are reserved for IANA standard service + names (as per RFC-6335 and http://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names such + as mycompany.com/my-custom-protocol. Field can be + enabled with ServiceAppProtocol feature gate. + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field in + the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this service + is exposed when type=NodePort or LoadBalancer. Usually + assigned by the system. If specified, it will be allocated + to the service if unused or else creation of the service + will fail. Default is to auto-allocate a port if the + ServiceType of this Service requires one. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access on + the pods targeted by the service. Number must be in + the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named + port in the target Pod''s container ports. If this + is not specified, the value of the ''port'' field + is used (an identity map). This field is ignored for + services with clusterIP=None, and should be omitted + or set equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses, when set to true, indicates + that DNS implementations must publish the notReadyAddresses + of subsets for the Endpoints associated with the Service. + The default value is false. The primary use case for setting + this field is to use a StatefulSet's Headless Service to + propagate SRV records for its Pods without respect to their + readiness for purpose of peer discovery. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label keys + and values matching this selector. If empty or not present, + the service is assumed to have an external process managing + its endpoints, which Kubernetes will not modify. Only applies + to types ClusterIP, NodePort, and LoadBalancer. Ignored + if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + topologyKeys: + description: topologyKeys is a preference-order list of topology + keys which implementations of services should use to preferentially + sort endpoints when accessing this Service, it can not be + used at the same time as externalTrafficPolicy=Local. Topology + keys must be valid label keys and at most 16 keys may be + specified. Endpoints are chosen based on the first topology + key with available backends. If this field is specified + and all entries have no backends that match the topology + of the client, the service has no backends for that client + and connections should fail. The special value "*" may be + used to mean "any topology". This catch-all value, if used, + only makes sense as the last value in the list. If this + is not specified or empty, no topology constraints will + be applied. + items: + type: string + type: array + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, + NodePort, and LoadBalancer. "ExternalName" maps to the specified + externalName. "ClusterIP" allocates a cluster-internal IP + address for load-balancing to endpoints. Endpoints are determined + by the selector or if that is not specified, by manual construction + of an Endpoints object. If clusterIP is "None", no virtual + IP is allocated and the endpoints are published as a set + of endpoints rather than a stable IP. "NodePort" builds + on ClusterIP and allocates a port on every node which routes + to the clusterIP. "LoadBalancer" builds on NodePort and + creates an external load-balancer (if supported in the current + cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + status: + description: 'Most recently observed status of the service. Populated + by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + loadBalancer: + description: LoadBalancer contains the current status of the + load-balancer, if one is present. + properties: + ingress: + description: Ingress is a list containing ingress points + for the load-balancer. Traffic intended for the service + should be sent to these ingress points. + items: + description: 'LoadBalancerIngress represents the status + of a load-balancer ingress point: traffic intended + for the service should be sent to an ingress point.' + properties: + hostname: + description: Hostname is set for load-balancer ingress + points that are DNS based (typically AWS load-balancers) + type: string + ip: + description: IP is set for load-balancer ingress + points that are IP based (typically GCE or OpenStack + load-balancers) + type: string + type: object + type: array + type: object + type: object + type: object + smartstore: + description: Splunk Smartstore configuration. Refer to indexes.conf.spec + and server.conf.spec on docs.splunk.com + properties: + cacheManager: + description: Defines Cache manager settings + properties: + evictionPadding: + description: Additional size beyond 'minFreeSize' before eviction + kicks in + type: integer + evictionPolicy: + description: Eviction policy to use + type: string + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxCacheSize: + description: Max cache size per partition + type: integer + maxConcurrentDownloads: + description: Maximum number of buckets that can be downloaded + from remote storage in parallel + type: integer + maxConcurrentUploads: + description: Maximum number of buckets that can be uploaded + to remote storage in parallel + type: integer + type: object + defaults: + description: Default configuration for indexes + properties: + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + volumeName: + description: Remote Volume name + type: string + type: object + indexes: + description: List of Splunk indexes + items: + description: IndexSpec defines Splunk index name and storage + path + properties: + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + name: + description: Splunk index name + type: string + remotePath: + description: Index location relative to the remote volume + path + type: string + volumeName: + description: Remote Volume name + type: string + type: object + type: array + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + tolerations: + description: Pod's tolerations for Kubernetes node's taint + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + varVolumeStorageConfig: + description: Storage configuration for /opt/splunk/var volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + volumes: + description: List of one or more Kubernetes volumes. These will be + mounted in all pod containers as as /mnt/ + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: ClusterMasterStatus defines the observed state of ClusterMaster + properties: + appContext: + description: App Framework status + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object + bundlePushInfo: + description: Bundle push status tracker + properties: + lastCheckInterval: + format: int64 + type: integer + needToPushMasterApps: + type: boolean + type: object + phase: + description: current phase of the cluster master + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + resourceRevMap: + additionalProperties: + type: string + description: Resource Revision tracker + type: object + selector: + description: selector for pods, used by HorizontalPodAutoscaler + type: string + smartstore: + description: Splunk Smartstore configuration. Refer to indexes.conf.spec + and server.conf.spec on docs.splunk.com + properties: + cacheManager: + description: Defines Cache manager settings + properties: + evictionPadding: + description: Additional size beyond 'minFreeSize' before eviction + kicks in + type: integer + evictionPolicy: + description: Eviction policy to use + type: string + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxCacheSize: + description: Max cache size per partition + type: integer + maxConcurrentDownloads: + description: Maximum number of buckets that can be downloaded + from remote storage in parallel + type: integer + maxConcurrentUploads: + description: Maximum number of buckets that can be uploaded + to remote storage in parallel + type: integer + type: object + defaults: + description: Default configuration for indexes + properties: + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + volumeName: + description: Remote Volume name + type: string + type: object + indexes: + description: List of Splunk indexes + items: + description: IndexSpec defines Splunk index name and storage + path + properties: + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + name: + description: Splunk index name + type: string + remotePath: + description: Index location relative to the remote volume + path + type: string + volumeName: + description: Remote Volume name + type: string + type: object + type: array + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1beta1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha3 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha2 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string diff --git a/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_indexerclusters_crd.yaml b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_indexerclusters_crd.yaml new file mode 100644 index 000000000..d384c7c82 --- /dev/null +++ b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_indexerclusters_crd.yaml @@ -0,0 +1,2607 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: indexerclusters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: IndexerCluster + listKind: IndexerClusterList + plural: indexerclusters + shortNames: + - idc + - idxc + singular: indexercluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of indexer cluster + jsonPath: .status.phase + name: Phase + type: string + - description: Status of cluster master + jsonPath: .status.clusterMasterPhase + name: Master + type: string + - description: Desired number of indexer peers + jsonPath: .status.replicas + name: Desired + type: integer + - description: Current number of ready indexer peers + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Age of indexer cluster + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: IndexerCluster is the Schema for a Splunk Enterprise indexer + cluster + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IndexerClusterSpec defines the desired state of a Splunk + Enterprise indexer cluster + properties: + Mock: + description: Mock to differentiate between UTs and actual reconcile + type: boolean + affinity: + description: Kubernetes Affinity rules that control how pods are assigned + to particular nodes. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + clusterMasterRef: + description: ClusterMasterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + defaults: + description: Inline map of default.yml overrides used to initialize + the environment + type: string + defaultsUrl: + description: Full path or URL for one or more default.yml files, separated + by commas + type: string + defaultsUrlApps: + description: Full path or URL for one or more defaults.yml files specific + to App install, separated by commas. The defaults listed here will + be installed on the CM, standalone, search head deployer or license + master instance. + type: string + etcVolumeStorageConfig: + description: Storage configuration for /opt/splunk/etc volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + extraEnv: + description: 'ExtraEnv refers to extra environment variables to be + passed to the Splunk instance containers WARNING: Setting environment + variables used by Splunk or Ansible will affect Splunk installation + and operation' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE + environment variables) + type: string + imagePullPolicy: + description: 'Sets pull policy for all images (either “Always” or + the default: “IfNotPresent”)' + enum: + - Always + - IfNotPresent + type: string + licenseMasterRef: + description: LicenseMasterRef refers to a Splunk Enterprise license + master managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + licenseUrl: + description: Full path or URL for a Splunk Enterprise license file + type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer + replicas: + description: Number of search head pods; a search head cluster will + be created if > 1 + format: int32 + type: integer + resources: + description: resource requirements for the pod containers + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + schedulerName: + description: Name of Scheduler to use for pod placement (defaults + to “default-scheduler”) + type: string + serviceAccount: + description: ServiceAccount is the service account used by the pods + deployed by the CRD. If not specified uses the default serviceAccount + for the namespace as per https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + type: string + serviceTemplate: + description: ServiceTemplate is a template used to create Kubernetes + services + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + clusterIP: + description: 'clusterIP is the IP address of the service and + is usually assigned randomly by the master. If an address + is specified manually and is not in use by others, it will + be allocated to the service; otherwise, creation of the + service will fail. This field can not be changed through + updates. Valid values are "None", empty string (""), or + a valid IP address. "None" can be specified for headless + services when proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. Ignored if + type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + externalIPs: + description: externalIPs is a list of IP addresses for which + nodes in the cluster will also accept traffic for this service. These + IPs are not managed by Kubernetes. The user is responsible + for ensuring that traffic arrives at a node with this IP. A + common example is external load-balancers that are not part + of the Kubernetes system. + items: + type: string + type: array + externalName: + description: externalName is the external reference that kubedns + or equivalent will return as a CNAME record for this service. + No proxying will be involved. Must be a valid RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires Type + to be ExternalName. + type: string + externalTrafficPolicy: + description: externalTrafficPolicy denotes if this Service + desires to route external traffic to node-local or cluster-wide + endpoints. "Local" preserves the client source IP and avoids + a second hop for LoadBalancer and Nodeport type services, + but risks potentially imbalanced traffic spreading. "Cluster" + obscures the client source IP and may cause a second hop + to another node, but should have good overall load-spreading. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. If not specified, HealthCheckNodePort + is created by the service api backend with the allocated + nodePort. Will use user-specified nodePort value if specified + by the client. Only effects when Type is set to LoadBalancer + and ExternalTrafficPolicy is set to Local. + format: int32 + type: integer + ipFamily: + description: ipFamily specifies whether this Service has a + preference for a particular IP family (e.g. IPv4 vs. IPv6). If + a specific IP family is requested, the clusterIP field will + be allocated from that family, if it is available in the + cluster. If no IP family is requested, the cluster's primary + IP family will be used. Other IP fields (loadBalancerIP, + loadBalancerSourceRanges, externalIPs) and controllers which + allocate external load-balancers should use the same IP + family. Endpoints for this Service will be of this family. This + field is immutable after creation. Assigning a ServiceIPFamily + not available in the cluster (e.g. IPv6 in IPv4 only cluster) + is an error condition and will fail during clusterIP assignment. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer LoadBalancer + will get created with the IP specified in this field. This + feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider load-balancer + will be restricted to the specified client IPs. This field + will be ignored if the cloud-provider does not support the + feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this port. + This field follows standard Kubernetes label syntax. + Un-prefixed names are reserved for IANA standard service + names (as per RFC-6335 and http://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names such + as mycompany.com/my-custom-protocol. Field can be + enabled with ServiceAppProtocol feature gate. + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field in + the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this service + is exposed when type=NodePort or LoadBalancer. Usually + assigned by the system. If specified, it will be allocated + to the service if unused or else creation of the service + will fail. Default is to auto-allocate a port if the + ServiceType of this Service requires one. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access on + the pods targeted by the service. Number must be in + the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named + port in the target Pod''s container ports. If this + is not specified, the value of the ''port'' field + is used (an identity map). This field is ignored for + services with clusterIP=None, and should be omitted + or set equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses, when set to true, indicates + that DNS implementations must publish the notReadyAddresses + of subsets for the Endpoints associated with the Service. + The default value is false. The primary use case for setting + this field is to use a StatefulSet's Headless Service to + propagate SRV records for its Pods without respect to their + readiness for purpose of peer discovery. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label keys + and values matching this selector. If empty or not present, + the service is assumed to have an external process managing + its endpoints, which Kubernetes will not modify. Only applies + to types ClusterIP, NodePort, and LoadBalancer. Ignored + if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + topologyKeys: + description: topologyKeys is a preference-order list of topology + keys which implementations of services should use to preferentially + sort endpoints when accessing this Service, it can not be + used at the same time as externalTrafficPolicy=Local. Topology + keys must be valid label keys and at most 16 keys may be + specified. Endpoints are chosen based on the first topology + key with available backends. If this field is specified + and all entries have no backends that match the topology + of the client, the service has no backends for that client + and connections should fail. The special value "*" may be + used to mean "any topology". This catch-all value, if used, + only makes sense as the last value in the list. If this + is not specified or empty, no topology constraints will + be applied. + items: + type: string + type: array + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, + NodePort, and LoadBalancer. "ExternalName" maps to the specified + externalName. "ClusterIP" allocates a cluster-internal IP + address for load-balancing to endpoints. Endpoints are determined + by the selector or if that is not specified, by manual construction + of an Endpoints object. If clusterIP is "None", no virtual + IP is allocated and the endpoints are published as a set + of endpoints rather than a stable IP. "NodePort" builds + on ClusterIP and allocates a port on every node which routes + to the clusterIP. "LoadBalancer" builds on NodePort and + creates an external load-balancer (if supported in the current + cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + status: + description: 'Most recently observed status of the service. Populated + by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + loadBalancer: + description: LoadBalancer contains the current status of the + load-balancer, if one is present. + properties: + ingress: + description: Ingress is a list containing ingress points + for the load-balancer. Traffic intended for the service + should be sent to these ingress points. + items: + description: 'LoadBalancerIngress represents the status + of a load-balancer ingress point: traffic intended + for the service should be sent to an ingress point.' + properties: + hostname: + description: Hostname is set for load-balancer ingress + points that are DNS based (typically AWS load-balancers) + type: string + ip: + description: IP is set for load-balancer ingress + points that are IP based (typically GCE or OpenStack + load-balancers) + type: string + type: object + type: array + type: object + type: object + type: object + tolerations: + description: Pod's tolerations for Kubernetes node's taint + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + varVolumeStorageConfig: + description: Storage configuration for /opt/splunk/var volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + volumes: + description: List of one or more Kubernetes volumes. These will be + mounted in all pod containers as as /mnt/ + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: IndexerClusterStatus defines the observed state of a Splunk + Enterprise indexer cluster + properties: + IdxcPasswordChangedSecrets: + additionalProperties: + type: boolean + description: Holds secrets whose IDXC password has changed + type: object + clusterMasterPhase: + description: current phase of the cluster master + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + indexer_secret_changed_flag: + description: Indicates when the idxc_secret has been changed for a + peer + items: + type: boolean + type: array + indexing_ready_flag: + description: Indicates if the cluster is ready for indexing. + type: boolean + initialized_flag: + description: Indicates if the cluster is initialized. + type: boolean + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean + namespace_scoped_secret_resource_version: + description: Indicates resource version of namespace scoped secret + type: string + peers: + description: status of each indexer cluster peer + items: + description: IndexerClusterMemberStatus is used to track the status + of each indexer cluster peer. + properties: + active_bundle_id: + description: The ID of the configuration bundle currently being + used by the master. + type: string + bucket_count: + description: Count of the number of buckets on this peer, across + all indexes. + format: int64 + type: integer + guid: + description: Unique identifier or GUID for the peer + type: string + is_searchable: + description: Flag indicating if this peer belongs to the current + committed generation and is searchable. + type: boolean + name: + description: Name of the indexer cluster peer + type: string + status: + description: Status of the indexer cluster peer + type: string + type: object + type: array + phase: + description: current phase of the indexer cluster + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready indexer peers + format: int32 + type: integer + replicas: + description: desired number of indexer peers + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler + type: string + service_ready_flag: + description: Indicates whether the master is ready to begin servicing, + based on whether it is initialized. + type: boolean + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1beta1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha3 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha2 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string diff --git a/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_licensemasters_crd.yaml b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_licensemasters_crd.yaml new file mode 100644 index 000000000..2bb45dcbc --- /dev/null +++ b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_licensemasters_crd.yaml @@ -0,0 +1,2719 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: licensemasters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: LicenseMaster + listKind: LicenseMasterList + plural: licensemasters + shortNames: + - lm + singular: licensemaster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of license master + jsonPath: .status.phase + name: Phase + type: string + - description: Age of license master + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: LicenseMaster is the Schema for a Splunk Enterprise license master. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: LicenseMasterSpec defines the desired state of a Splunk Enterprise + license master. + properties: + Mock: + description: Mock to differentiate between UTs and actual reconcile + type: boolean + affinity: + description: Kubernetes Affinity rules that control how pods are assigned + to particular nodes. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + appRepo: + description: Splunk enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + clusterMasterRef: + description: ClusterMasterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + defaults: + description: Inline map of default.yml overrides used to initialize + the environment + type: string + defaultsUrl: + description: Full path or URL for one or more default.yml files, separated + by commas + type: string + defaultsUrlApps: + description: Full path or URL for one or more defaults.yml files specific + to App install, separated by commas. The defaults listed here will + be installed on the CM, standalone, search head deployer or license + master instance. + type: string + etcVolumeStorageConfig: + description: Storage configuration for /opt/splunk/etc volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + extraEnv: + description: 'ExtraEnv refers to extra environment variables to be + passed to the Splunk instance containers WARNING: Setting environment + variables used by Splunk or Ansible will affect Splunk installation + and operation' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE + environment variables) + type: string + imagePullPolicy: + description: 'Sets pull policy for all images (either “Always” or + the default: “IfNotPresent”)' + enum: + - Always + - IfNotPresent + type: string + licenseMasterRef: + description: LicenseMasterRef refers to a Splunk Enterprise license + master managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + licenseUrl: + description: Full path or URL for a Splunk Enterprise license file + type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer + resources: + description: resource requirements for the pod containers + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + schedulerName: + description: Name of Scheduler to use for pod placement (defaults + to “default-scheduler”) + type: string + serviceAccount: + description: ServiceAccount is the service account used by the pods + deployed by the CRD. If not specified uses the default serviceAccount + for the namespace as per https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + type: string + serviceTemplate: + description: ServiceTemplate is a template used to create Kubernetes + services + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + clusterIP: + description: 'clusterIP is the IP address of the service and + is usually assigned randomly by the master. If an address + is specified manually and is not in use by others, it will + be allocated to the service; otherwise, creation of the + service will fail. This field can not be changed through + updates. Valid values are "None", empty string (""), or + a valid IP address. "None" can be specified for headless + services when proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. Ignored if + type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + externalIPs: + description: externalIPs is a list of IP addresses for which + nodes in the cluster will also accept traffic for this service. These + IPs are not managed by Kubernetes. The user is responsible + for ensuring that traffic arrives at a node with this IP. A + common example is external load-balancers that are not part + of the Kubernetes system. + items: + type: string + type: array + externalName: + description: externalName is the external reference that kubedns + or equivalent will return as a CNAME record for this service. + No proxying will be involved. Must be a valid RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires Type + to be ExternalName. + type: string + externalTrafficPolicy: + description: externalTrafficPolicy denotes if this Service + desires to route external traffic to node-local or cluster-wide + endpoints. "Local" preserves the client source IP and avoids + a second hop for LoadBalancer and Nodeport type services, + but risks potentially imbalanced traffic spreading. "Cluster" + obscures the client source IP and may cause a second hop + to another node, but should have good overall load-spreading. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. If not specified, HealthCheckNodePort + is created by the service api backend with the allocated + nodePort. Will use user-specified nodePort value if specified + by the client. Only effects when Type is set to LoadBalancer + and ExternalTrafficPolicy is set to Local. + format: int32 + type: integer + ipFamily: + description: ipFamily specifies whether this Service has a + preference for a particular IP family (e.g. IPv4 vs. IPv6). If + a specific IP family is requested, the clusterIP field will + be allocated from that family, if it is available in the + cluster. If no IP family is requested, the cluster's primary + IP family will be used. Other IP fields (loadBalancerIP, + loadBalancerSourceRanges, externalIPs) and controllers which + allocate external load-balancers should use the same IP + family. Endpoints for this Service will be of this family. This + field is immutable after creation. Assigning a ServiceIPFamily + not available in the cluster (e.g. IPv6 in IPv4 only cluster) + is an error condition and will fail during clusterIP assignment. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer LoadBalancer + will get created with the IP specified in this field. This + feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider load-balancer + will be restricted to the specified client IPs. This field + will be ignored if the cloud-provider does not support the + feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this port. + This field follows standard Kubernetes label syntax. + Un-prefixed names are reserved for IANA standard service + names (as per RFC-6335 and http://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names such + as mycompany.com/my-custom-protocol. Field can be + enabled with ServiceAppProtocol feature gate. + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field in + the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this service + is exposed when type=NodePort or LoadBalancer. Usually + assigned by the system. If specified, it will be allocated + to the service if unused or else creation of the service + will fail. Default is to auto-allocate a port if the + ServiceType of this Service requires one. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access on + the pods targeted by the service. Number must be in + the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named + port in the target Pod''s container ports. If this + is not specified, the value of the ''port'' field + is used (an identity map). This field is ignored for + services with clusterIP=None, and should be omitted + or set equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses, when set to true, indicates + that DNS implementations must publish the notReadyAddresses + of subsets for the Endpoints associated with the Service. + The default value is false. The primary use case for setting + this field is to use a StatefulSet's Headless Service to + propagate SRV records for its Pods without respect to their + readiness for purpose of peer discovery. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label keys + and values matching this selector. If empty or not present, + the service is assumed to have an external process managing + its endpoints, which Kubernetes will not modify. Only applies + to types ClusterIP, NodePort, and LoadBalancer. Ignored + if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + topologyKeys: + description: topologyKeys is a preference-order list of topology + keys which implementations of services should use to preferentially + sort endpoints when accessing this Service, it can not be + used at the same time as externalTrafficPolicy=Local. Topology + keys must be valid label keys and at most 16 keys may be + specified. Endpoints are chosen based on the first topology + key with available backends. If this field is specified + and all entries have no backends that match the topology + of the client, the service has no backends for that client + and connections should fail. The special value "*" may be + used to mean "any topology". This catch-all value, if used, + only makes sense as the last value in the list. If this + is not specified or empty, no topology constraints will + be applied. + items: + type: string + type: array + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, + NodePort, and LoadBalancer. "ExternalName" maps to the specified + externalName. "ClusterIP" allocates a cluster-internal IP + address for load-balancing to endpoints. Endpoints are determined + by the selector or if that is not specified, by manual construction + of an Endpoints object. If clusterIP is "None", no virtual + IP is allocated and the endpoints are published as a set + of endpoints rather than a stable IP. "NodePort" builds + on ClusterIP and allocates a port on every node which routes + to the clusterIP. "LoadBalancer" builds on NodePort and + creates an external load-balancer (if supported in the current + cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + status: + description: 'Most recently observed status of the service. Populated + by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + loadBalancer: + description: LoadBalancer contains the current status of the + load-balancer, if one is present. + properties: + ingress: + description: Ingress is a list containing ingress points + for the load-balancer. Traffic intended for the service + should be sent to these ingress points. + items: + description: 'LoadBalancerIngress represents the status + of a load-balancer ingress point: traffic intended + for the service should be sent to an ingress point.' + properties: + hostname: + description: Hostname is set for load-balancer ingress + points that are DNS based (typically AWS load-balancers) + type: string + ip: + description: IP is set for load-balancer ingress + points that are IP based (typically GCE or OpenStack + load-balancers) + type: string + type: object + type: array + type: object + type: object + type: object + tolerations: + description: Pod's tolerations for Kubernetes node's taint + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + varVolumeStorageConfig: + description: Storage configuration for /opt/splunk/var volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + volumes: + description: List of one or more Kubernetes volumes. These will be + mounted in all pod containers as as /mnt/ + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: LicenseMasterStatus defines the observed state of a Splunk + Enterprise license master. + properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object + phase: + description: current phase of the license master + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1beta1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha3 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha2 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string diff --git a/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_searchheadclusters_crd.yaml b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_searchheadclusters_crd.yaml new file mode 100644 index 000000000..64798ec92 --- /dev/null +++ b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_searchheadclusters_crd.yaml @@ -0,0 +1,2828 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: searchheadclusters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: SearchHeadCluster + listKind: SearchHeadClusterList + plural: searchheadclusters + shortNames: + - shc + singular: searchheadcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of search head cluster + jsonPath: .status.phase + name: Phase + type: string + - description: Status of the deployer + jsonPath: .status.deployerPhase + name: Deployer + type: string + - description: Desired number of search head cluster members + jsonPath: .status.replicas + name: Desired + type: integer + - description: Current number of ready search head cluster members + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Age of search head cluster + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: SearchHeadCluster is the Schema for a Splunk Enterprise search + head cluster + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SearchHeadClusterSpec defines the desired state of a Splunk + Enterprise search head cluster + properties: + Mock: + description: Mock to differentiate between UTs and actual reconcile + type: boolean + affinity: + description: Kubernetes Affinity rules that control how pods are assigned + to particular nodes. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + clusterMasterRef: + description: ClusterMasterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + defaults: + description: Inline map of default.yml overrides used to initialize + the environment + type: string + defaultsUrl: + description: Full path or URL for one or more default.yml files, separated + by commas + type: string + defaultsUrlApps: + description: Full path or URL for one or more defaults.yml files specific + to App install, separated by commas. The defaults listed here will + be installed on the CM, standalone, search head deployer or license + master instance. + type: string + etcVolumeStorageConfig: + description: Storage configuration for /opt/splunk/etc volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + extraEnv: + description: 'ExtraEnv refers to extra environment variables to be + passed to the Splunk instance containers WARNING: Setting environment + variables used by Splunk or Ansible will affect Splunk installation + and operation' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE + environment variables) + type: string + imagePullPolicy: + description: 'Sets pull policy for all images (either “Always” or + the default: “IfNotPresent”)' + enum: + - Always + - IfNotPresent + type: string + licenseMasterRef: + description: LicenseMasterRef refers to a Splunk Enterprise license + master managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + licenseUrl: + description: Full path or URL for a Splunk Enterprise license file + type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer + replicas: + description: Number of search head pods; a search head cluster will + be created if > 1 + format: int32 + type: integer + resources: + description: resource requirements for the pod containers + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + schedulerName: + description: Name of Scheduler to use for pod placement (defaults + to “default-scheduler”) + type: string + serviceAccount: + description: ServiceAccount is the service account used by the pods + deployed by the CRD. If not specified uses the default serviceAccount + for the namespace as per https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + type: string + serviceTemplate: + description: ServiceTemplate is a template used to create Kubernetes + services + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + clusterIP: + description: 'clusterIP is the IP address of the service and + is usually assigned randomly by the master. If an address + is specified manually and is not in use by others, it will + be allocated to the service; otherwise, creation of the + service will fail. This field can not be changed through + updates. Valid values are "None", empty string (""), or + a valid IP address. "None" can be specified for headless + services when proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. Ignored if + type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + externalIPs: + description: externalIPs is a list of IP addresses for which + nodes in the cluster will also accept traffic for this service. These + IPs are not managed by Kubernetes. The user is responsible + for ensuring that traffic arrives at a node with this IP. A + common example is external load-balancers that are not part + of the Kubernetes system. + items: + type: string + type: array + externalName: + description: externalName is the external reference that kubedns + or equivalent will return as a CNAME record for this service. + No proxying will be involved. Must be a valid RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires Type + to be ExternalName. + type: string + externalTrafficPolicy: + description: externalTrafficPolicy denotes if this Service + desires to route external traffic to node-local or cluster-wide + endpoints. "Local" preserves the client source IP and avoids + a second hop for LoadBalancer and Nodeport type services, + but risks potentially imbalanced traffic spreading. "Cluster" + obscures the client source IP and may cause a second hop + to another node, but should have good overall load-spreading. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. If not specified, HealthCheckNodePort + is created by the service api backend with the allocated + nodePort. Will use user-specified nodePort value if specified + by the client. Only effects when Type is set to LoadBalancer + and ExternalTrafficPolicy is set to Local. + format: int32 + type: integer + ipFamily: + description: ipFamily specifies whether this Service has a + preference for a particular IP family (e.g. IPv4 vs. IPv6). If + a specific IP family is requested, the clusterIP field will + be allocated from that family, if it is available in the + cluster. If no IP family is requested, the cluster's primary + IP family will be used. Other IP fields (loadBalancerIP, + loadBalancerSourceRanges, externalIPs) and controllers which + allocate external load-balancers should use the same IP + family. Endpoints for this Service will be of this family. This + field is immutable after creation. Assigning a ServiceIPFamily + not available in the cluster (e.g. IPv6 in IPv4 only cluster) + is an error condition and will fail during clusterIP assignment. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer LoadBalancer + will get created with the IP specified in this field. This + feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider load-balancer + will be restricted to the specified client IPs. This field + will be ignored if the cloud-provider does not support the + feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this port. + This field follows standard Kubernetes label syntax. + Un-prefixed names are reserved for IANA standard service + names (as per RFC-6335 and http://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names such + as mycompany.com/my-custom-protocol. Field can be + enabled with ServiceAppProtocol feature gate. + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field in + the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this service + is exposed when type=NodePort or LoadBalancer. Usually + assigned by the system. If specified, it will be allocated + to the service if unused or else creation of the service + will fail. Default is to auto-allocate a port if the + ServiceType of this Service requires one. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access on + the pods targeted by the service. Number must be in + the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named + port in the target Pod''s container ports. If this + is not specified, the value of the ''port'' field + is used (an identity map). This field is ignored for + services with clusterIP=None, and should be omitted + or set equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses, when set to true, indicates + that DNS implementations must publish the notReadyAddresses + of subsets for the Endpoints associated with the Service. + The default value is false. The primary use case for setting + this field is to use a StatefulSet's Headless Service to + propagate SRV records for its Pods without respect to their + readiness for purpose of peer discovery. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label keys + and values matching this selector. If empty or not present, + the service is assumed to have an external process managing + its endpoints, which Kubernetes will not modify. Only applies + to types ClusterIP, NodePort, and LoadBalancer. Ignored + if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + topologyKeys: + description: topologyKeys is a preference-order list of topology + keys which implementations of services should use to preferentially + sort endpoints when accessing this Service, it can not be + used at the same time as externalTrafficPolicy=Local. Topology + keys must be valid label keys and at most 16 keys may be + specified. Endpoints are chosen based on the first topology + key with available backends. If this field is specified + and all entries have no backends that match the topology + of the client, the service has no backends for that client + and connections should fail. The special value "*" may be + used to mean "any topology". This catch-all value, if used, + only makes sense as the last value in the list. If this + is not specified or empty, no topology constraints will + be applied. + items: + type: string + type: array + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, + NodePort, and LoadBalancer. "ExternalName" maps to the specified + externalName. "ClusterIP" allocates a cluster-internal IP + address for load-balancing to endpoints. Endpoints are determined + by the selector or if that is not specified, by manual construction + of an Endpoints object. If clusterIP is "None", no virtual + IP is allocated and the endpoints are published as a set + of endpoints rather than a stable IP. "NodePort" builds + on ClusterIP and allocates a port on every node which routes + to the clusterIP. "LoadBalancer" builds on NodePort and + creates an external load-balancer (if supported in the current + cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + status: + description: 'Most recently observed status of the service. Populated + by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + loadBalancer: + description: LoadBalancer contains the current status of the + load-balancer, if one is present. + properties: + ingress: + description: Ingress is a list containing ingress points + for the load-balancer. Traffic intended for the service + should be sent to these ingress points. + items: + description: 'LoadBalancerIngress represents the status + of a load-balancer ingress point: traffic intended + for the service should be sent to an ingress point.' + properties: + hostname: + description: Hostname is set for load-balancer ingress + points that are DNS based (typically AWS load-balancers) + type: string + ip: + description: IP is set for load-balancer ingress + points that are IP based (typically GCE or OpenStack + load-balancers) + type: string + type: object + type: array + type: object + type: object + type: object + tolerations: + description: Pod's tolerations for Kubernetes node's taint + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + varVolumeStorageConfig: + description: Storage configuration for /opt/splunk/var volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + volumes: + description: List of one or more Kubernetes volumes. These will be + mounted in all pod containers as as /mnt/ + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: SearchHeadClusterStatus defines the observed state of a Splunk + Enterprise search head cluster + properties: + adminPasswordChangedSecrets: + additionalProperties: + type: boolean + description: Holds secrets whose admin password has changed + type: object + adminSecretChangedFlag: + description: Indicates when the admin password has been changed for + a peer + items: + type: boolean + type: array + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object + captain: + description: name or label of the search head captain + type: string + captainReady: + description: true if the search head cluster's captain is ready to + service requests + type: boolean + deployerPhase: + description: current phase of the deployer + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + initialized: + description: true if the search head cluster has finished initialization + type: boolean + maintenanceMode: + description: true if the search head cluster is in maintenance mode + type: boolean + members: + description: status of each search head cluster member + items: + description: SearchHeadClusterMemberStatus is used to track the + status of each search head cluster member + properties: + active_historical_search_count: + description: Number of currently running historical searches. + type: integer + active_realtime_search_count: + description: Number of currently running realtime searches. + type: integer + adhoc_searchhead: + description: Flag that indicates if this member can run scheduled + searches. + type: boolean + is_registered: + description: Indicates if this member is registered with the + searchhead cluster captain. + type: boolean + name: + description: Name of the search head cluster member + type: string + status: + description: Indicates the status of the member. + type: string + type: object + type: array + minPeersJoined: + description: true if the minimum number of search head cluster members + have joined + type: boolean + namespace_scoped_secret_resource_version: + description: Indicates resource version of namespace scoped secret + type: string + phase: + description: current phase of the search head cluster + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready search head cluster members + format: int32 + type: integer + replicas: + description: desired number of search head cluster members + format: int32 + type: integer + selector: + description: selector for pods, used by HorizontalPodAutoscaler + type: string + shcSecretChangedFlag: + description: Indicates when the shc_secret has been changed for a + peer + items: + type: boolean + type: array + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1beta1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha3 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha2 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string diff --git a/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_standalones_crd.yaml b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_standalones_crd.yaml new file mode 100644 index 000000000..e31b397ac --- /dev/null +++ b/deploy/olm-catalog/splunk/1.0.2/enterprise.splunk.com_standalones_crd.yaml @@ -0,0 +1,2971 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: standalones.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: Standalone + listKind: StandaloneList + plural: standalones + shortNames: + - stdaln + singular: standalone + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of standalone instances + jsonPath: .status.phase + name: Phase + type: string + - description: Number of desired standalone instances + jsonPath: .status.replicas + name: Desired + type: integer + - description: Current number of ready standalone instances + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Age of standalone resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v2 + schema: + openAPIV3Schema: + description: Standalone is the Schema for a Splunk Enterprise standalone instances. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: StandaloneSpec defines the desired state of a Splunk Enterprise + standalone instances. + properties: + Mock: + description: Mock to differentiate between UTs and actual reconcile + type: boolean + affinity: + description: Kubernetes Affinity rules that control how pods are assigned + to particular nodes. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node matches + the corresponding matchExpressions; the node(s) with the + highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches + all objects with implicit weight 0 (i.e. it's a no-op). + A null preferred scheduling term matches no objects (i.e. + is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to an update), the system may or may not try to + eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: A null or empty node selector term matches + no objects. The requirements of them are ANDed. The + TopologySelectorTerm type implements a subset of the + NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: A node selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: Represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists, DoesNotExist. Gt, and + Lt. + type: string + values: + description: An array of string values. If + the operator is In or NotIn, the values + array must be non-empty. If the operator + is Exists or DoesNotExist, the values array + must be empty. If the operator is Gt or + Lt, the values array must have a single + element, which will be interpreted as an + integer. This array is replaced during a + strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the affinity expressions specified by + this field, but it may choose a node that violates one or + more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this + field are not met at scheduling time, the pod will not be + scheduled onto the node. If the affinity requirements specified + by this field cease to be met at some point during pod execution + (e.g. due to a pod label update), the system may or may + not try to eventually evict the pod from its node. When + there are multiple elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, i.e. all terms + must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to + nodes that satisfy the anti-affinity expressions specified + by this field, but it may choose a node that violates one + or more of the expressions. The node that is most preferred + is the one with the greatest sum of weights, i.e. for each + node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, + etc.), compute a sum by iterating through the elements of + this field and adding "weight" to the sum if the node has + pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces + the labelSelector applies to (matches against); + null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods + matching the labelSelector in the specified namespaces, + where co-located is defined as running on a node + whose value of the label with key topologyKey + matches that of any node on which any of the selected + pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding + podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by + this field are not met at scheduling time, the pod will + not be scheduled onto the node. If the anti-affinity requirements + specified by this field cease to be met at some point during + pod execution (e.g. due to a pod label update), the system + may or may not try to eventually evict the pod from its + node. When there are multiple elements, the lists of nodes + corresponding to each podAffinityTerm are intersected, i.e. + all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching + the labelSelector relative to the given namespace(s)) + that this pod should be co-located (affinity) or not co-located + (anti-affinity) with, where co-located is defined as running + on a node whose value of the label with key + matches that of any node on which a pod of the set of + pods is running + properties: + labelSelector: + description: A label query over a set of resources, + in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a + selector that contains values, a key, and an + operator that relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are + In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If the + operator is Exists or DoesNotExist, the + values array must be empty. This array is + replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". The + requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the + labelSelector applies to (matches against); null or + empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) + or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where + co-located is defined as running on a node whose value + of the label with key topologyKey matches that of + any node on which any of the selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes. The default value for this config is 1 hour(3600 + sec), minimum value is 1 minute(60sec) and maximum value is + 1 day(86400 sec). We assign the value based on following conditions + - 1. If no value or 0 is specified then it will be defaulted + to 1 hour. 2. If anything less than min is specified then + we set it to 1 min. 3. If anything more than the max value + is specified then we set it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + clusterMasterRef: + description: ClusterMasterRef refers to a Splunk Enterprise indexer + cluster managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + defaults: + description: Inline map of default.yml overrides used to initialize + the environment + type: string + defaultsUrl: + description: Full path or URL for one or more default.yml files, separated + by commas + type: string + defaultsUrlApps: + description: Full path or URL for one or more defaults.yml files specific + to App install, separated by commas. The defaults listed here will + be installed on the CM, standalone, search head deployer or license + master instance. + type: string + etcVolumeStorageConfig: + description: Storage configuration for /opt/splunk/etc volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + extraEnv: + description: 'ExtraEnv refers to extra environment variables to be + passed to the Splunk instance containers WARNING: Setting environment + variables used by Splunk or Ansible will affect Splunk installation + and operation' + items: + description: EnvVar represents an environment variable present in + a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using + the previous defined environment variables in the container + and any service environment variables. If a variable cannot + be resolved, the reference in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped with a double $$, ie: + $$(VAR_NAME). Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot + be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key + must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + image: + description: Image to use for Splunk pod containers (overrides RELATED_IMAGE_SPLUNK_ENTERPRISE + environment variables) + type: string + imagePullPolicy: + description: 'Sets pull policy for all images (either “Always” or + the default: “IfNotPresent”)' + enum: + - Always + - IfNotPresent + type: string + licenseMasterRef: + description: LicenseMasterRef refers to a Splunk Enterprise license + master managed by the operator within Kubernetes + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + licenseUrl: + description: Full path or URL for a Splunk Enterprise license file + type: string + livenessInitialDelaySeconds: + description: 'LivenessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) + for the Liveness probe Note: If needed, Operator overrides with + a higher value' + format: int32 + type: integer + readinessInitialDelaySeconds: + description: 'ReadinessInitialDelaySeconds defines initialDelaySeconds(See + https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) + for Readiness probe Note: If needed, Operator overrides with a higher + value' + format: int32 + type: integer + replicas: + description: Number of standalone pods + format: int32 + type: integer + resources: + description: resource requirements for the pod containers + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + schedulerName: + description: Name of Scheduler to use for pod placement (defaults + to “default-scheduler”) + type: string + serviceAccount: + description: ServiceAccount is the service account used by the pods + deployed by the CRD. If not specified uses the default serviceAccount + for the namespace as per https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server + type: string + serviceTemplate: + description: ServiceTemplate is a template used to create Kubernetes + services + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + properties: + clusterIP: + description: 'clusterIP is the IP address of the service and + is usually assigned randomly by the master. If an address + is specified manually and is not in use by others, it will + be allocated to the service; otherwise, creation of the + service will fail. This field can not be changed through + updates. Valid values are "None", empty string (""), or + a valid IP address. "None" can be specified for headless + services when proxying is not required. Only applies to + types ClusterIP, NodePort, and LoadBalancer. Ignored if + type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + externalIPs: + description: externalIPs is a list of IP addresses for which + nodes in the cluster will also accept traffic for this service. These + IPs are not managed by Kubernetes. The user is responsible + for ensuring that traffic arrives at a node with this IP. A + common example is external load-balancers that are not part + of the Kubernetes system. + items: + type: string + type: array + externalName: + description: externalName is the external reference that kubedns + or equivalent will return as a CNAME record for this service. + No proxying will be involved. Must be a valid RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires Type + to be ExternalName. + type: string + externalTrafficPolicy: + description: externalTrafficPolicy denotes if this Service + desires to route external traffic to node-local or cluster-wide + endpoints. "Local" preserves the client source IP and avoids + a second hop for LoadBalancer and Nodeport type services, + but risks potentially imbalanced traffic spreading. "Cluster" + obscures the client source IP and may cause a second hop + to another node, but should have good overall load-spreading. + type: string + healthCheckNodePort: + description: healthCheckNodePort specifies the healthcheck + nodePort for the service. If not specified, HealthCheckNodePort + is created by the service api backend with the allocated + nodePort. Will use user-specified nodePort value if specified + by the client. Only effects when Type is set to LoadBalancer + and ExternalTrafficPolicy is set to Local. + format: int32 + type: integer + ipFamily: + description: ipFamily specifies whether this Service has a + preference for a particular IP family (e.g. IPv4 vs. IPv6). If + a specific IP family is requested, the clusterIP field will + be allocated from that family, if it is available in the + cluster. If no IP family is requested, the cluster's primary + IP family will be used. Other IP fields (loadBalancerIP, + loadBalancerSourceRanges, externalIPs) and controllers which + allocate external load-balancers should use the same IP + family. Endpoints for this Service will be of this family. This + field is immutable after creation. Assigning a ServiceIPFamily + not available in the cluster (e.g. IPv6 in IPv4 only cluster) + is an error condition and will fail during clusterIP assignment. + type: string + loadBalancerIP: + description: 'Only applies to Service Type: LoadBalancer LoadBalancer + will get created with the IP specified in this field. This + feature depends on whether the underlying cloud-provider + supports specifying the loadBalancerIP when a load balancer + is created. This field will be ignored if the cloud-provider + does not support the feature.' + type: string + loadBalancerSourceRanges: + description: 'If specified and supported by the platform, + this will restrict traffic through the cloud-provider load-balancer + will be restricted to the specified client IPs. This field + will be ignored if the cloud-provider does not support the + feature." More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/' + items: + type: string + type: array + ports: + description: 'The list of ports that are exposed by this service. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + items: + description: ServicePort contains information on service's + port. + properties: + appProtocol: + description: The application protocol for this port. + This field follows standard Kubernetes label syntax. + Un-prefixed names are reserved for IANA standard service + names (as per RFC-6335 and http://www.iana.org/assignments/service-names). + Non-standard protocols should use prefixed names such + as mycompany.com/my-custom-protocol. Field can be + enabled with ServiceAppProtocol feature gate. + type: string + name: + description: The name of this port within the service. + This must be a DNS_LABEL. All ports within a ServiceSpec + must have unique names. When considering the endpoints + for a Service, this must match the 'name' field in + the EndpointPort. Optional if only one ServicePort + is defined on this service. + type: string + nodePort: + description: 'The port on each node on which this service + is exposed when type=NodePort or LoadBalancer. Usually + assigned by the system. If specified, it will be allocated + to the service if unused or else creation of the service + will fail. Default is to auto-allocate a port if the + ServiceType of this Service requires one. More info: + https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' + format: int32 + type: integer + port: + description: The port that will be exposed by this service. + format: int32 + type: integer + protocol: + description: The IP protocol for this port. Supports + "TCP", "UDP", and "SCTP". Default is TCP. + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Number or name of the port to access on + the pods targeted by the service. Number must be in + the range 1 to 65535. Name must be an IANA_SVC_NAME. + If this is a string, it will be looked up as a named + port in the target Pod''s container ports. If this + is not specified, the value of the ''port'' field + is used (an identity map). This field is ignored for + services with clusterIP=None, and should be omitted + or set equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' + x-kubernetes-int-or-string: true + required: + - port + - protocol + type: object + type: array + x-kubernetes-list-map-keys: + - port + - protocol + x-kubernetes-list-type: map + publishNotReadyAddresses: + description: publishNotReadyAddresses, when set to true, indicates + that DNS implementations must publish the notReadyAddresses + of subsets for the Endpoints associated with the Service. + The default value is false. The primary use case for setting + this field is to use a StatefulSet's Headless Service to + propagate SRV records for its Pods without respect to their + readiness for purpose of peer discovery. + type: boolean + selector: + additionalProperties: + type: string + description: 'Route service traffic to pods with label keys + and values matching this selector. If empty or not present, + the service is assumed to have an external process managing + its endpoints, which Kubernetes will not modify. Only applies + to types ClusterIP, NodePort, and LoadBalancer. Ignored + if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/' + type: object + sessionAffinity: + description: 'Supports "ClientIP" and "None". Used to maintain + session affinity. Enable client IP based session affinity. + Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations of Client + IP based session affinity. + properties: + timeoutSeconds: + description: timeoutSeconds specifies the seconds + of ClientIP type session sticky time. The value + must be >0 && <=86400(for 1 day) if ServiceAffinity + == "ClientIP". Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + topologyKeys: + description: topologyKeys is a preference-order list of topology + keys which implementations of services should use to preferentially + sort endpoints when accessing this Service, it can not be + used at the same time as externalTrafficPolicy=Local. Topology + keys must be valid label keys and at most 16 keys may be + specified. Endpoints are chosen based on the first topology + key with available backends. If this field is specified + and all entries have no backends that match the topology + of the client, the service has no backends for that client + and connections should fail. The special value "*" may be + used to mean "any topology". This catch-all value, if used, + only makes sense as the last value in the list. If this + is not specified or empty, no topology constraints will + be applied. + items: + type: string + type: array + type: + description: 'type determines how the Service is exposed. + Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, + NodePort, and LoadBalancer. "ExternalName" maps to the specified + externalName. "ClusterIP" allocates a cluster-internal IP + address for load-balancing to endpoints. Endpoints are determined + by the selector or if that is not specified, by manual construction + of an Endpoints object. If clusterIP is "None", no virtual + IP is allocated and the endpoints are published as a set + of endpoints rather than a stable IP. "NodePort" builds + on ClusterIP and allocates a port on every node which routes + to the clusterIP. "LoadBalancer" builds on NodePort and + creates an external load-balancer (if supported in the current + cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types' + type: string + type: object + status: + description: 'Most recently observed status of the service. Populated + by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + loadBalancer: + description: LoadBalancer contains the current status of the + load-balancer, if one is present. + properties: + ingress: + description: Ingress is a list containing ingress points + for the load-balancer. Traffic intended for the service + should be sent to these ingress points. + items: + description: 'LoadBalancerIngress represents the status + of a load-balancer ingress point: traffic intended + for the service should be sent to an ingress point.' + properties: + hostname: + description: Hostname is set for load-balancer ingress + points that are DNS based (typically AWS load-balancers) + type: string + ip: + description: IP is set for load-balancer ingress + points that are IP based (typically GCE or OpenStack + load-balancers) + type: string + type: object + type: array + type: object + type: object + type: object + smartstore: + description: Splunk Smartstore configuration. Refer to indexes.conf.spec + and server.conf.spec on docs.splunk.com + properties: + cacheManager: + description: Defines Cache manager settings + properties: + evictionPadding: + description: Additional size beyond 'minFreeSize' before eviction + kicks in + type: integer + evictionPolicy: + description: Eviction policy to use + type: string + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxCacheSize: + description: Max cache size per partition + type: integer + maxConcurrentDownloads: + description: Maximum number of buckets that can be downloaded + from remote storage in parallel + type: integer + maxConcurrentUploads: + description: Maximum number of buckets that can be uploaded + to remote storage in parallel + type: integer + type: object + defaults: + description: Default configuration for indexes + properties: + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + volumeName: + description: Remote Volume name + type: string + type: object + indexes: + description: List of Splunk indexes + items: + description: IndexSpec defines Splunk index name and storage + path + properties: + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + name: + description: Splunk index name + type: string + remotePath: + description: Index location relative to the remote volume + path + type: string + volumeName: + description: Remote Volume name + type: string + type: object + type: array + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + tolerations: + description: Pod's tolerations for Kubernetes node's taint + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + varVolumeStorageConfig: + description: Storage configuration for /opt/splunk/var volume + properties: + ephemeralStorage: + description: If true, ephemeral (emptyDir) storage will be used + type: boolean + storageCapacity: + description: Storage capacity to request persistent volume claims + (default=”10Gi” for etc and "100Gi" for var) + type: string + storageClassName: + description: Name of StorageClass to use for persistent volume + claims + type: string + type: object + volumes: + description: List of one or more Kubernetes volumes. These will be + mounted in all pod containers as as /mnt/ + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the default + is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on + the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per storage + account Managed: azure managed data disk (only in managed + availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that + shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring + for User, default is /etc/ceph/user.secret More info: + https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication + secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate + this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the ConfigMap, the volume setup will error unless it is + marked optional. Paths must be relative and may not contain + the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must + be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage + that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles + this volume. Consult with your admin for the correct name + as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod + that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this + directory. The default is "" which means to use the node''s + default medium. Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to the + pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret + object containing sensitive information to pass to the + plugin scripts. This may be empty if no secret object + is specified. If the secret object contains more than + one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to + a kubelet's host machine. This depends on the Flocker control + service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name + on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir into + the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details + Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More + info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is + attached to a kubelet''s host machine and then exposed to + the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new iSCSI + interface : will be created + for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an + IP or ip_addr:port if the port is other than default (typically + TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares + a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More + info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to + be mounted with read-only permissions. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the + NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a + reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. More + info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating + system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits + set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with + other supported volume types + properties: + configMap: + description: information about the configMap data + to project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the ConfigMap, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data + to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' + path. Must be utf-8 encoded. The first + item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu + and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to + project + properties: + items: + description: If unspecified, each key-value pair + in the Data field of the referenced Secret will + be projected into the volume as a file whose + name is the key and content is the value. If + specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified which + is not present in the Secret, the volume setup + will error unless it is marked optional. Paths + must be relative and may not contain the '..' + path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might be + in conflict with other options that affect + the file mode, like fsGroup, and the result + can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file + to map the key to. May not be an absolute + path. May not contain the path element + '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken + data to project + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must identify + itself with an identifier specified in the audience + of the token, and otherwise should reject the + token. The audience defaults to the identifier + of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, the + kubelet volume plugin will proactively rotate + the service account token. The kubelet will + start trying to rotate the token if the token + is older than 80 percent of its time to live + or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no + group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte + Registry services specified as a string as host:port pair + (multiple entries are separated with commas) which acts + as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the + Backend Used with dynamically provisioned Quobyte volumes, + value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret + for RBDUser. If provided overrides keyring. Default is + nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for + the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not provided, + Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with + Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the + protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and the + result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and content + is the value. If specified, the listed keys will be projected + into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in + the Secret, the volume setup will error unless it is marked + optional. Paths must be relative and may not contain the + '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, + must be a value between 0 and 0777. If not specified, + the volume defaultMode will be used. This might + be in conflict with other options that affect the + file mode, like fsGroup, and the result can be other + mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here + will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining + the StorageOS API credentials. If not specified, default + values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the + StorageOS volume. Volume names are only unique within + a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name to + override the default behaviour. Set to "default" if you + are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: StandaloneStatus defines the observed state of a Splunk Enterprise + standalone instances. + properties: + appContext: + description: App Framework Context + properties: + appRepo: + description: List of App package (*.spl, *.tgz) locations on remote + volume + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed + in this location. Logical name must be unique to the + appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, + local. Scope determines whether the App(s) is/are + installed locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage + for App changes. The default value for this config is 1 + hour(3600 sec), minimum value is 1 minute(60sec) and maximum + value is 1 day(86400 sec). We assign the value based on + following conditions - 1. If no value or 0 is specified + then it will be defaulted to 1 hour. 2. If anything less + than min is specified then we set it to 1 min. 3. If + anything more than the max value is specified then we set + it to 1 day. + format: int64 + type: integer + defaults: + description: Defines the default configuration settings for + App sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed + locally or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3' + type: string + type: object + type: array + type: object + appSrcDeployStatus: + additionalProperties: + description: AppSrcDeployInfo represents deployment info for + list of Apps + properties: + appDeploymentInfo: + items: + description: AppDeploymentInfo represents a single App + deployment information + properties: + Size: + format: int64 + type: integer + appName: + type: string + deployStatus: + description: AppDeploymentStatus represents the status + of an App on the Pod + type: integer + lastModifiedTime: + type: string + objectHash: + type: string + repoState: + description: AppRepoState represent the App state + on remote store + type: integer + type: object + type: array + type: object + description: Represents the Apps deployment status + type: object + appsRepoStatusPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes This is introduced here so that we dont do spec + validation in every reconcile just because the spec and status + are different. + format: int64 + type: integer + isDeploymentInProgress: + description: IsDeploymentInProgress indicates if the Apps deployment + is in progress + type: boolean + lastAppInfoCheckTime: + description: This is set to the time when we get the list of apps + from remote storage. + format: int64 + type: integer + version: + description: App Framework version info for future use + type: integer + type: object + phase: + description: current phase of the standalone instances + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + readyReplicas: + description: current number of ready standalone instances + format: int32 + type: integer + replicas: + description: number of desired standalone instances + format: int32 + type: integer + resourceRevMap: + additionalProperties: + type: string + description: Resource Revision tracker + type: object + selector: + description: selector for pods, used by HorizontalPodAutoscaler + type: string + smartstore: + description: Splunk Smartstore configuration. Refer to indexes.conf.spec + and server.conf.spec on docs.splunk.com + properties: + cacheManager: + description: Defines Cache manager settings + properties: + evictionPadding: + description: Additional size beyond 'minFreeSize' before eviction + kicks in + type: integer + evictionPolicy: + description: Eviction policy to use + type: string + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxCacheSize: + description: Max cache size per partition + type: integer + maxConcurrentDownloads: + description: Maximum number of buckets that can be downloaded + from remote storage in parallel + type: integer + maxConcurrentUploads: + description: Maximum number of buckets that can be uploaded + to remote storage in parallel + type: integer + type: object + defaults: + description: Default configuration for indexes + properties: + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + volumeName: + description: Remote Volume name + type: string + type: object + indexes: + description: List of Splunk indexes + items: + description: IndexSpec defines Splunk index name and storage + path + properties: + hotlistBloomFilterRecencyHours: + description: Time period relative to the bucket's age, during + which the bloom filter file is protected from cache eviction + type: integer + hotlistRecencySecs: + description: Time period relative to the bucket's age, during + which the bucket is protected from cache eviction + type: integer + maxGlobalDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of space for warm and cold buckets of an index + type: integer + maxGlobalRawDataSizeMB: + description: MaxGlobalDataSizeMB defines the maximum amount + of cumulative space for warm and cold buckets of an index + type: integer + name: + description: Splunk index name + type: string + remotePath: + description: Index location relative to the remote volume + path + type: string + volumeName: + description: Remote Volume name + type: string + type: object + type: array + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3' + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1beta1 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha3 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string + - name: v1alpha2 + served: true + storage: false + schema: + openAPIV3Schema: + type: object + properties: + apiVersion: + type: string diff --git a/deploy/olm-catalog/splunk/1.0.2/splunk.v1.0.2.clusterserviceversion.yaml b/deploy/olm-catalog/splunk/1.0.2/splunk.v1.0.2.clusterserviceversion.yaml new file mode 100644 index 000000000..6a70343fb --- /dev/null +++ b/deploy/olm-catalog/splunk/1.0.2/splunk.v1.0.2.clusterserviceversion.yaml @@ -0,0 +1,342 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [{ + "apiVersion": "enterprise.splunk.com/v2", + "kind": "IndexerCluster", + "metadata": { + "name": "example", + "finalizers": [ "enterprise.splunk.com/delete-pvc" ] + }, + "spec": { + "replicas": 1 + } + }, + { + "apiVersion": "enterprise.splunk.com/v2", + "kind": "LicenseMaster", + "metadata": { + "name": "example", + "finalizers": [ "enterprise.splunk.com/delete-pvc" ] + }, + "spec": {} + }, + { + "apiVersion": "enterprise.splunk.com/v2", + "kind": "SearchHeadCluster", + "metadata": { + "name": "example", + "finalizers": [ "enterprise.splunk.com/delete-pvc" ] + }, + "spec": { + "replicas": 1 + } + }, + { + "apiVersion": "enterprise.splunk.com/v2", + "kind": "Standalone", + "metadata": { + "name": "example", + "finalizers": [ "enterprise.splunk.com/delete-pvc" ] + }, + "spec": {} + }] + capabilities: Basic Install + name: splunk.v1.0.2 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: ClusterMaster is the Schema for the clustermasters API + kind: ClusterMaster + name: clustermasters.enterprise.splunk.com + version: v2 + resources: + - kind: StatefulSets + version: apps/v1 + - kind: Deployments + version: apps/v1 + - kind: Pods + version: v1 + - kind: Services + version: v1 + - kind: ConfigMaps + version: v1 + - kind: Secrets + version: v1 + displayName: IndexerCluster + - displayName: LicenseMaster + kind: ClusterMaster + name: clustermasters.enterprise.splunk.com + resources: + - kind: StatefulSets + version: apps/v1 + - kind: Deployments + version: apps/v1 + - kind: Pods + version: v1 + - kind: Services + version: v1 + - kind: ConfigMaps + version: v1 + - kind: Secrets + version: v1 + version: v1 + - displayName: SearchHeadCluster + kind: ClusterMaster + name: clustermasters.enterprise.splunk.com + resources: + - kind: StatefulSets + version: apps/v1 + - kind: Deployments + version: apps/v1 + - kind: Pods + version: v1 + - kind: Services + version: v1 + - kind: ConfigMaps + version: v1 + - kind: Secrets + version: v1 + version: v1beta1 + - kind: ClusterMaster + name: clustermasters.enterprise.splunk.com + resources: + - kind: StatefulSets + version: apps/v1 + - kind: Deployments + version: apps/v1 + - kind: Pods + version: v1 + - kind: Services + version: v1 + - kind: ConfigMaps + version: v1 + - kind: Secrets + version: v1 + version: v1alpha3 + - displayName: Standalone + kind: ClusterMaster + name: clustermasters.enterprise.splunk.com + resources: + - kind: StatefulSets + version: apps/v1 + - kind: Deployments + version: apps/v1 + - kind: Pods + version: v1 + - kind: Services + version: v1 + - kind: ConfigMaps + version: v1 + - kind: Secrets + version: v1 + version: v1alpha2 + - description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster + kind: IndexerCluster + name: indexerclusters.enterprise.splunk.com + version: v2 + - kind: IndexerCluster + name: indexerclusters.enterprise.splunk.com + version: v1 + - kind: IndexerCluster + name: indexerclusters.enterprise.splunk.com + version: v1beta1 + - kind: IndexerCluster + name: indexerclusters.enterprise.splunk.com + version: v1alpha3 + - kind: IndexerCluster + name: indexerclusters.enterprise.splunk.com + version: v1alpha2 + - description: LicenseMaster is the Schema for a Splunk Enterprise license master. + kind: LicenseMaster + name: licensemasters.enterprise.splunk.com + version: v2 + - kind: LicenseMaster + name: licensemasters.enterprise.splunk.com + version: v1 + - kind: LicenseMaster + name: licensemasters.enterprise.splunk.com + version: v1beta1 + - kind: LicenseMaster + name: licensemasters.enterprise.splunk.com + version: v1alpha3 + - kind: LicenseMaster + name: licensemasters.enterprise.splunk.com + version: v1alpha2 + - description: SearchHeadCluster is the Schema for a Splunk Enterprise search + head cluster + kind: SearchHeadCluster + name: searchheadclusters.enterprise.splunk.com + version: v2 + - kind: SearchHeadCluster + name: searchheadclusters.enterprise.splunk.com + version: v1 + - kind: SearchHeadCluster + name: searchheadclusters.enterprise.splunk.com + version: v1beta1 + - kind: SearchHeadCluster + name: searchheadclusters.enterprise.splunk.com + version: v1alpha3 + - kind: SearchHeadCluster + name: searchheadclusters.enterprise.splunk.com + version: v1alpha2 + - description: Standalone is the Schema for a Splunk Enterprise standalone instances. + kind: Standalone + name: standalones.enterprise.splunk.com + version: v2 + - kind: Standalone + name: standalones.enterprise.splunk.com + version: v1 + - kind: Standalone + name: standalones.enterprise.splunk.com + version: v1beta1 + - kind: Standalone + name: standalones.enterprise.splunk.com + version: v1alpha3 + - kind: Standalone + name: standalones.enterprise.splunk.com + version: v1alpha2 + description: K8 Operator for Splunk Enterprise Deployments + displayName: Splunk Operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + clusterPermissions: + - rules: + - apiGroups: + - enterprise.splunk.com + resources: + - '*' + verbs: + - '*' + - apiGroups: + - apps + resources: + - deployments + - statefulsets + verbs: + - list + - get + - watch + serviceAccountName: splunk:operator:resource-manager + deployments: + - name: splunk-operator + spec: + replicas: 1 + selector: + matchLabels: + name: splunk-operator + strategy: {} + template: + metadata: + labels: + name: splunk-operator + spec: + containers: + - env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: splunk-operator + - name: RELATED_IMAGE_SPLUNK_ENTERPRISE + value: docker.io/splunk/splunk:8.2.1 + image: docker.io/splunk/splunk-operator:1.0.2 + imagePullPolicy: IfNotPresent + name: splunk-operator + resources: {} + serviceAccountName: splunk-operator + permissions: + - rules: + - apiGroups: + - "" + resources: + - services + - endpoints + - persistentvolumeclaims + - configmaps + - secrets + - pods + - pods/exec + - serviceaccounts + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - apiGroups: + - enterprise.splunk.com + resources: + - '*' + - indexerclusters + - clustermasters + - licensemasters + - searchheadclusters + - standalones + verbs: + - '*' + serviceAccountName: splunk-operator + strategy: deployment + installModes: + - supported: true + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - Splunk + - Enterprise + - Operator + links: + - name: Splunk + url: https://splunk.domain + maintainers: + - email: docker-maint@splunk.com + name: Splunk Docker Maintainers + maturity: alpha + provider: + name: Splunk Inc. + url: www.splunk.com + version: 1.0.2 diff --git a/deploy/operator.yaml b/deploy/operator.yaml index cf0af2db8..f95ae884b 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -30,4 +30,4 @@ spec: - name: OPERATOR_NAME value: "splunk-operator" - name: RELATED_IMAGE_SPLUNK_ENTERPRISE - value: "docker.io/splunk/splunk:8.2.0" + value: "docker.io/splunk/splunk:8.2.1" diff --git a/docs/AppFramework.md b/docs/AppFramework.md new file mode 100644 index 000000000..cae5e2075 --- /dev/null +++ b/docs/AppFramework.md @@ -0,0 +1,327 @@ +# App Framework Resource Guide + +The Splunk Operator provides support for Splunk App and Add-on deployment using the App Framework (Beta Version). The App Framework specification adds support for S3-compatible app repository, authentication, and supports specific Splunk Enterprise cluster and standalone [custom resources](https://splunk.github.io/splunk-operator/CustomResources.html) (CR). + +### Prerequisites + +Utilizing the App Framework requires: + +* An Amazon S3 or S3-API-compliant remote object storage location. App framework requires read-only access to the path containing the apps. +* The remote object storage credentials. +* Splunk Apps and Add-ons in a .tgz or .spl archive format. +* Connections to the remote object storage endpoint need to be secure using a minimum version of TLS 1.2. + + +### How to use the App Framework on a Standalone CR + +In this example, you'll deploy a Standalone CR with a remote storage volume, the location of the app archives, and set the installation location for the Splunk Enterprise Pod instance by using `scope`. + +1. Confirm your S3-based remote storage volume path and URL. +2. Create a Kubernetes Secret Object with the storage credentials. + * Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=AKIAIOSFODNN7EXAMPLE --from-literal=s3_secret_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` +3. Create folders on the remote storage volume to use as App Source locations. + * An App Source is a folder on the remote storage volume containing a subset of Splunk Apps and Add-ons. In this example, we split the network and authentication Splunk Apps into different folders and named them `networkApps` and `authApps`. + +4. Copy your Splunk App or Add-on archive files to the App Source. + * In this example, the Splunk Apps are located at `bucket-app-framework-us-west-2/Standalone-us/networkAppsLoc/` and `bucket-app-framework-us-west-2/Standalone-us/authAppsLoc/`, and are both accessible through the end point `https://s3-us-west-2.amazonaws.com`. + +5. Update the standalone CR specification and append the volume, App Source configuration, and scope. + * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CRs where the Splunk Enterprise instance will run the apps locally, set the `scope: local ` The Standalone and License Master CRs always use a local scope. + +Example: Standalone.yaml + +```yaml +apiVersion: enterprise.splunk.com/v2 +kind: Standalone +metadata: + name: stdln + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + replicas: 1 + appRepo: + appsRepoPollIntervalSeconds: 600 + defaults: + volumeName: volume_app_repo + scope: local + appSources: + - name: networkApps + location: networkAppsLoc/ + - name: authApps + location: authAppsLoc/ + volumes: + - name: volume_app_repo + storageType: s3 + provider: aws + path: bucket-app-framework-us-west-2/Standalone-us/ + endpoint: https://s3-us-west-2.amazonaws.com + secretRef: s3-secret +``` + +6. Apply the Custom Resource specification: `kubectl apply -f Standalone.yaml` + +The App Framework detects the Splunk App archive files available in the App Source locations, and deploys the apps to the standalone instance for local use. The App Framework will also scan for changes to the App Source folders based on the polling interval, and deploy updated archives to the instance. A Pod reset is triggered to install the new or modified apps. + +Note: A similar approach can be used for installing apps on License Master using it's own CR. + +For more information, see the [Description of App Framework Specification fields](#description-of-app-framework-specification-fields). + + +### How to use the App Framework on Indexer Cluster + +This example describes the installation of apps on Indexer Cluster as well as Cluster Master. This is achieved by deploying a ClusterMaster CR with a remote storage volume, the location of the app archives, and set the installation scope to support both local and cluster app distribution. + +1. Confirm your S3-based remote storage volume path and URL. +2. Create a Kubernetes Secret Object with the storage credentials. + * Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=AKIAIOSFODNN7EXAMPLE --from-literal=s3_secret_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` +3. Create folders on the remote storage volume to use as App Source locations. + * An App Source is a folder on the remote storage volume containing a subset of Splunk Apps and Add-ons. In this example, we have Splunk apps that are installed and run locally on the cluster master, and apps that will be distributed to all cluster peers by the cluster master. + * The apps are split across 3 folders named `networkApps`, `clusterBase`, and `adminApps` . The apps placed into `networkApps` and `clusterBase` are distributed to the cluster peers, but the apps in `adminApps` are for local use on the cluster master instance only. + +4. Copy your Splunk App or Add-on archive files to the App Source. + * In this example, the Splunk Apps for the cluster peers are located at `bucket-app-framework-us-west-2/idxcAndCmApps/networkAppsLoc/`, `bucket-app-framework-us-west-2/idxcAndCmApps/clusterBaseLoc/`, and the apps for the cluster master are located at`bucket-app-framework-us-west-2/idxcAndCmApps/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com`. + +5. Update the ClusterMaster CR specification and append the volume, App Source configuration, and scope. + * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CR's where the Splunk Enterprise instance will deploy the apps to cluster peers, set the `scope: cluster`. The ClusterMaster and SearchHeadCluster CR's support both cluster and local scopes. + * In this example, the cluster master will run some apps locally, and deploy other apps to the cluster peers. The App Source folder `adminApps` are Splunk Apps that are installed on the cluster master, and will use a local scope. The apps in the App Source folders `networkApps` and `clusterBase` will be deployed from the cluster master to the peers, and will use a cluster scope. + +Example: ClusterMaster.yaml + +```yaml +apiVersion: enterprise.splunk.com/v2 +kind: ClusterMaster +metadata: + name: cm + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + replicas: 1 + appRepo: + appsRepoPollIntervalSeconds: 900 + defaults: + volumeName: volume_app_repo_us + scope: cluster + appSources: + - name: networkApps + location: networkAppsLoc/ + - name: clusterBase + location: clusterBaseLoc/ + - name: adminApps + location: adminAppsLoc/ + scope: local + volumes: + - name: volume_app_repo_us + storageType: s3 + provider: aws + path: bucket-app-framework-us-west-2/idxcAndCmApps/ + endpoint: https://s3-us-west-2.amazonaws.com + secretRef: s3-secret +``` + +6. Apply the Custom Resource specification: `kubectl apply -f ClusterMaster.yaml` + +The App Framework detects the Splunk App archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the cluster master instance for local use. A Pod reset is triggered on the cluster master to install any new or modified apps. The App Framework will also scan for changes to the App Source folders based on the polling interval, and deploy updated archives to the instance. + +The apps in the `networkApps` and `clusterBase` folders are deployed to the cluster master for use on the cluster. The cluster master is responsible for deploying those apps to the cluster peers. The Splunk cluster peer restarts are triggered by the contents of the Splunk apps deployed, and are not initiated by the App Framework. + +For more information, see the [Description of App Framework Specification fields](#description-of-app-framework-specification-fields) + +### How to use the App Framework on Search Head Cluster + +This example describes the installation of apps on Search Head Cluster as well as Deployer. This is achieved by deploying a SearchHeadCluster CR with a storage volume, the location of the app archives, and set the installation scope to support both local and cluster app distribution. + +1. Confirm your S3-based remote storage volume path and URL. +2. Create a Kubernetes Secret Object with the storage credentials. + * Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=AKIAIOSFODNN7EXAMPLE --from-literal=s3_secret_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` +3. Create folders on the remote storage volume to use as App Source locations. + * An App Source is a folder on the remote storage volume containing a subset of Splunk Apps and Add-ons. In this example, we have Splunk apps that are installed and run locally on the Deployer, and apps that will be distributed to all cluster search heads by the Deployer. + * The apps are split across 3 folders named `searchApps`, `machineLearningApps`, and `adminApps`. The apps placed into `searchApps` and `machineLearningApps` are distributed to the search heads, but the apps in `adminApps` are for local use on the Deployer instance only. + +4. Copy your Splunk App or Add-on archive files to the App Source. + * In this example, the Splunk Apps for the search heads are located at `bucket-app-framework-us-west-2/shcLoc-us/searchAppsLoc/`, `bucket-app-framework-us-west-2/shcLoc-us/machineLearningAppsLoc/`, and the apps for the Deployer are located at`bucket-app-framework-us-west-2/shcLoc-us/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com`. + +5. Update the SearchHeadCluster CR specification and append the volume, App Source configuration, and scope. + * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CR's where the Splunk Enterprise instance will deploy the apps to search heads, set the `scope: cluster`. The ClusterMaster and SearchHeadCluster CR's support both cluster and local scopes. + * In this example, the Deployer will run some apps locally, and deploy other apps to the search heads. The App Source folder `adminApps` are Splunk Apps that are installed on the Deployer, and will use a local scope. The apps in the App Source folders `searchApps` and `machineLearningApps` will be deployed from the Deployer to the search heads, and will use a cluster scope. + +Example: SearchHeadCluster.yaml + +```yaml +apiVersion: enterprise.splunk.com/v2 +kind: SearchHeadCluster +metadata: + name: shc + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + replicas: 1 + appRepo: + appsRepoPollIntervalSeconds: 900 + defaults: + volumeName: volume_app_repo_us + scope: cluster + appSources: + - name: searchApps + location: searchAppsLoc/ + - name: machineLearningApps + location: machineLearningAppsLoc/ + - name: adminApps + location: adminAppsLoc/ + scope: local + volumes: + - name: volume_app_repo_us + storageType: s3 + provider: aws + path: bucket-app-framework-us-west-2/shcLoc-us/ + endpoint: https://s3-us-west-2.amazonaws.com + secretRef: s3-secret +``` + +6. Apply the Custom Resource specification: `kubectl apply -f SearchHeadCluster.yaml` + +The App Framework detects the Splunk App archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the deployer instance for local use. A Pod reset is triggered on the deployer to install any new or modified apps. The App Framework will also scan for changes to the App Source folders based on the polling interval, and deploy updated archives to the instance. + +The apps in the `searchApps` and `machineLearningApps` folders are deployed to the deployer for use on the search head cluster. The deployer is responsible for deploying those apps to the search heads. The Splunk Search Head restarts are triggered by the contents of the Splunk apps deployed, and are not initiated by the App Framework. + +For more information, see the [Description of App Framework Specification fields](#description-of-app-framework-specification-fields). + + +## Description of App Framework Specification fields +App Framework configuration is supported on the following Custom Resources: Standalone, ClusterMaster, SearchHeadCluster, and LicenseMaster. Configuring the App framework involves the following steps: + +* Remote Source of Apps: Define the remote location including the bucket(s) and path for each bucket +* Destination of Apps: Define where the Apps need to be installed (in other words, which Custom resources need to be configured) +* Scope of Apps: Define if the Apps need to be installed locally (such as Standalone) or cluster-wide (such as Indexer cluster + +Here is a typical App framework configuration in a Custom resource definition: + +```yaml + appRepo: + description: Splunk Enterprise App repository. Specifies remote App + location and scope for Splunk App management + properties: + appSources: + description: List of App sources on remote storage + items: + description: AppSourceSpec defines list of App package (*.spl, + *.tgz) locations on remote volumes + properties: + location: + description: Location relative to the volume path + type: string + name: + description: Logical name for the set of apps placed in + this location. Logical name must be unique to the appRepo + type: string + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + type: array + appsRepoPollIntervalSeconds: + description: Interval in seconds to check the Remote Storage for + App changes + type: integer + defaults: + description: Defines the default configuration settings for App + sources + properties: + scope: + description: 'Scope of the App deployment: cluster, local. + Scope determines whether the App(s) is/are installed locally + or cluster-wide' + type: string + volumeName: + description: Remote Storage Volume name + type: string + type: object + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. For e.g. + aws, azure, minio, etc. Currently we are only supporting + aws. TODO: Support minio as well.' + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: Remote Storage type. + type: string + type: object + type: array + type: object +``` + +### appRepo + +`appRepo` is the start of the App Framework specification, and contains all the configurations required for App framework to be successfully configured + +### volumes + +`volumes` helps configure the remote storage volumes. App framework expects apps that are to be installed in various Splunk deployments to be hosted in one or more remote storage volumes + +* `name` uniquely identifies the remote storage volume name within a CR. This is to locally used by the Operator to identify the volume +* `storageType` describes the type of remote storage. Currently `s3` is the only supported type +* `provider` describes the remote storage provider. Currently `aws` & `minio` are the supported providers +* `endpoint` helps configure the URI/URL of the remote storage endpoint that hosts the apps +* `secretRef` refers to the K8s secret object containing the remote storage access key +* `path` describes the path (including the bucket) of one or more app sources on the remote store + +### appSources + +`appSources` helps configure the name & scope of the appSource, as well as remote storage volume & location + +* `name` uniquely identifies the App source configuration within a CR. This is to locally used by the Operator to identify the App source +* `scope` defines the scope of the App to be installed. + * If the scope is `local` the apps will be installed locally on the pod referred to by the CR + * If the scope is `cluster` the apps will be installed across the cluster referred to by the CR + * The cluster scope is only supported on CR's that manage cluster-wide app deployment + + | CRD Type | Scope support | App Framework support | + | :---------------- | :------------- | :-------------------- | + | ClusterManager | cluster, local | Yes | + | SearchHeadCluster | cluster, local | Yes | + | Standalone | local | Yes | + | LicenceMaster | local | Yes | + | IndexerCluster | N/A | No | + +* `volume` refers to the remote storage volume name configured under the `volumes` stanza (see previous section) +* `location` helps configure the specific appSource present under the `path` within the `volume`, containing the apps to be installed + +### appsRepoPollIntervalSeconds + +`appsRepoPollIntervalSeconds` helps configure the polling interval(in seconds) to detect addition or modification of apps on the Remote Storage + +## Impact of livenessInitialDelaySeconds and readinessInitialDelaySeconds + +* Splunk Operator CRDs support the configuration of [initialDelaySeconds](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) for both Liveliness (livenessInitialDelaySeconds) and Readiness (readinessInitialDelaySeconds) probes +* Default values are 300 seconds for livenessInitialDelaySeconds and 10 seconds for readinessInitialDelaySeconds +* When Appframework is configured as part of a CR, depending on the number of Apps being configured, Operator can also override the default or configured values for both probes with internally calculated higher values. This is to ensure that optimal values are being used to allow for successful installation or update of Apps especially in large scale deployments. + +## App Framework Limitations + +The App Framework does not review, preview, analyze, or enable Splunk Apps and Add-ons. The administrator is responsible for previewing the app or add-on contents, verifying the app is enabled, and that the app is supported with the version of Splunk Enterprise used in the containers. For App packaging specifications see [Package apps for Splunk Cloud or Splunk Enterprise](https://dev.splunk.com/enterprise/docs/releaseapps/packageapps/) in the Splunk Enterprise Developer documentation. The app archive files must end with .spl or .tgz; all other files are ignored. + +1. The App Framework has no support to remove an app or add-on once it’s been deployed. To disable an app, update the archive contents located in the App Source, and set the app.conf state to disabled. + +2. The App Framework tracks the app installation state per CR. Whenever you scale up a Standalone CR, all the existing pods will recycle and all the apps in app sources will be re-installed. This is done so that the new replica(s) can install all the apps and not just the apps that were changed recently. + +3. When a change in the App Repo is detected by the App Framework, a pod reset is initiated to install the new or modified applications. For the ClusterMaster and SearchHeadCluster CR’s, a pod reset is applied to the cluster master and deployer instances only. A cluster peer restart might be triggered by the contents of the Splunk apps deployed, but are not initiated by the App Framework. \ No newline at end of file diff --git a/docs/ChangeLog.md b/docs/ChangeLog.md index ee0a9dfe6..69ba326a2 100644 --- a/docs/ChangeLog.md +++ b/docs/ChangeLog.md @@ -3,7 +3,7 @@ ## 1.0.1 (2021-06-09) * This is the 1.0.1 release. The Splunk Operator for Kubernetes is a supported platform for deploying Splunk Enterprise with the prerequisites and constraints laid out [here](https://github.com/splunk/splunk-operator/blob/develop/docs/README.md#prerequisites-for-the-splunk-operator) -* This release depends upon changes made concurrently in the Splunk Enterprise container images. You should use the splunk/splunk:8.2.0 image with it +* This release depends upon changes made concurrently in the Splunk Enterprise container images. You should use the splunk/splunk:8.2.1 image with it * Upgraded operator-sdk version from v0.15.1 to v0.18.2 diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 9c7a819ea..a7097d3ff 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -33,7 +33,7 @@ you would like the resource to reside within: If you do not provide a `namespace`, you current context will be used. ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: s1 @@ -51,12 +51,19 @@ associated with the instance when you delete it. ## Common Spec Parameters for All Resources ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: example spec: imagePullPolicy: Always + livenessInitialDelaySeconds: 400 + readinessInitialDelaySeconds: 390 + extraEnv: + - name: ADDITIONAL_ENV_VAR_1 + value: "test_value_1" + - name: ADDITIONAL_ENV_VAR_2 + value: "test_value_2" resources: requests: memory: "512Mi" @@ -74,6 +81,9 @@ configuration parameters: | --------------------- | ---------- | ---------------------------------------------------------------------------------------------------------- | | image | string | Container image to use for pod instances (overrides `RELATED_IMAGE_SPLUNK_ENTERPRISE` environment variable | | imagePullPolicy | string | Sets pull policy for all images (either "Always" or the default: "IfNotPresent") | +| livenessInitialDelaySeconds | number | Sets the initialDelaySeconds for Liveness probe (default: 300) | +| readinessInitialDelaySeconds | number | Sets the initialDelaySeconds for Readiness probe (default: 10) | +| extraEnv | [EnvVar](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvar-v1-core) | Sets the extra environment variables to be passed to the Splunk instance containers. WARNING: Setting environment variables used by Splunk or Ansible will affect Splunk installation and operation | | schedulerName | string | Name of [Scheduler](https://kubernetes.io/docs/concepts/scheduling/kube-scheduler/) to use for pod placement (defaults to "default-scheduler") | | affinity | [Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core) | [Kubernetes Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity) rules that control how pods are assigned to particular nodes | | resources | [ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#resourcerequirements-v1-core) | The settings for allocating [compute resource requirements](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) to use for each pod instance. The default settings should be considered for demo/test purposes. Please see [Hardware Resource Requirements](https://github.com/splunk/splunk-operator/blob/develop/docs/README.md#hardware-resources-requirements) for production values.| @@ -82,7 +92,7 @@ configuration parameters: ## Common Spec Parameters for Splunk Enterprise Resources ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: example @@ -123,7 +133,7 @@ Enterprise resources, including: `Standalone`, `LicenseMaster`, ## LicenseMaster Resource Spec Parameters ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: LicenseMaster metadata: name: example @@ -143,7 +153,7 @@ The `LicenseMaster` resource does not provide any additional configuration param ## Standalone Resource Spec Parameters ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: standalone @@ -166,7 +176,7 @@ the `Standalone` resource provides the following `Spec` configuration parameters ## SearchHeadCluster Resource Spec Parameters ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: SearchHeadCluster metadata: name: example @@ -185,7 +195,7 @@ the `SearchHeadCluster` resource provides the following `Spec` configuration par ## ClusterMaster Resource Spec Parameters ClusterMaster resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below - ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: ClusterMaster metadata: name: example-cm @@ -214,7 +224,7 @@ spec: ## IndexerCluster Resource Spec Parameters ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: IndexerCluster metadata: name: example @@ -246,7 +256,7 @@ The minimum resource requirements for a Standalone Splunk Enterprise instance ar Example: ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: example @@ -267,7 +277,7 @@ Set the ```requests``` value for CPU and memory lower than the ```limits``` valu Example: ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: example diff --git a/docs/Examples.md b/docs/Examples.md index 1f133bd28..1d80f276f 100644 --- a/docs/Examples.md +++ b/docs/Examples.md @@ -37,7 +37,7 @@ indexers. A `Standalone` resource can be used to create a single instance that can perform either, or both, of these roles. ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: single @@ -56,7 +56,7 @@ The Splunk Operator makes creation of an indexer cluster as easy as creating a ` #### Cluster Master ```yaml cat <`: The [Splunk Enterprise image](https://github.com/splunk/docker-splunk) (8.2.0 or later) +* `splunk/splunk-operator`: The Splunk Operator image built by this repository or the [official release](https://hub.docker.com/r/splunk/splunk-operator) (1.0.2 or later) +* `splunk/splunk:`: The [Splunk Enterprise image](https://github.com/splunk/docker-splunk) (8.2.1 or later) All of these images are publicly available, and published on [Docker Hub](https://hub.docker.com/). diff --git a/docs/Ingress.md b/docs/Ingress.md index 265b87a0f..59926c586 100644 --- a/docs/Ingress.md +++ b/docs/Ingress.md @@ -309,7 +309,7 @@ When using TLS for Ingress, we recommend you add an additional port for secure c This example shows how to add port 9998 for a standalone instance: ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: standalone diff --git a/docs/MultisiteExamples.md b/docs/MultisiteExamples.md index 5867c3494..2034d6648 100644 --- a/docs/MultisiteExamples.md +++ b/docs/MultisiteExamples.md @@ -49,7 +49,7 @@ Note: the image version is defined in these resources as this allows to control ```yaml cat <`: The [Splunk Enterprise image](https://github.com/splunk/docker-splunk) (8.2.0 or later) +* `splunk/splunk-operator`: The Splunk Operator image built by this repository or the [official release](https://hub.docker.com/r/splunk/splunk-operator) (1.0.2 or later) +* `splunk/splunk:`: The [Splunk Enterprise image](https://github.com/splunk/docker-splunk) (8.2.1 or later) All of the Splunk Enterprise images are publicly available on [Docker Hub](https://hub.docker.com/). If your cluster does not have access to pull from Docker Hub, see the [Required Images Documentation](Images.md) page. @@ -135,7 +135,7 @@ The `Standalone` custom resource is used to create a single instance deployment ```yaml cat < --from-literal=s3_access_key= --from-literal=s3_secret_key=` + +Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=iRo9guRpeT2EWn18QvpdcqLBcZmW1SDg== --from-literal=s3_secret_key=ZXvNDSfRo64UelY7Y4JZTO1iGSZt5xaQ2` ## Creating a SmartStore-enabled Standalone instance @@ -28,32 +30,35 @@ Here is an example command to encode and load your remote storage volume secret Example. Standalone.yaml: ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: - name: + name: s1 finalizers: - enterprise.splunk.com/delete-pvc spec: - replicas: 1 smartstore: - volumes: - - name: - path: - endpoint: https://s3-.amazonaws.com - secretRef: + defaults: + volumeName: s2s3_vol indexes: - - name: + - name: networkmonitor + volumeName: s2s3_vol remotePath: $_index_name - volumeName: - - name: - remotePath: $_index_name - volumeName: - - name: - remotePath: $_index_name - volumeName: + - name: salesdata + - name: oslogs + volumes: + - name: s2s3_vol + path: indexdata-s2-bucket/standaloneNodes/s1data/ + endpoint: https://s3-us-west-2.amazonaws.com + secretRef: s3-secret ``` +1. In the above example, `indexdata-s2-bucket` is the bucket name on remote storage, `standaloneNodes/s1data` is the relative path on that bucket in which the index data is stored. +2. There are 3 indexes defined in the above config example, i.e `networkmonitor`, `salesdata` and `oslogs`. +3. `defaults:` section is configured with the s3 `volumeName` parameter. An index can override with a specific volumeName, as shown for `networkmonitor` index. (Note: Unless multiple S3 volumes are used, specifying the `volumeName` in `defaults:` section makes it simple, and no need to repeat it across all the indexes) +4. If the remotePath is not explicitly specified, a default value of `$_index_name` is assumed. For the semantics of `$_index_name`, please see [indexes.conf](https://docs.splunk.com/Documentation/Splunk/latest/Admin/Indexesconf) in Splunk docs. + + The SmartStore parameters will be placed into the required .conf files in an app. The app is named as `splunk-operator`. In the case of a standalone deployment, the app is located at `/opt/splunk/etc/apps/` Note: Custom apps with higher precedence can potentially overwrite the index and volume configuration in the splunk-operator app. Hence, care should be taken to avoid conflicting SmartStore configuration in custom apps. See [Configuration file precedence order](https://docs.splunk.com/Documentation/Splunk/latest/Admin/Wheretofindtheconfigurationfiles#How_Splunk_determines_precedence_order) @@ -72,7 +77,7 @@ Note: Custom apps with higher precedence can potentially overwrite the index and Example. Clustermaster.yaml: ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: ClusterMaster metadata: name: @@ -108,7 +113,7 @@ Note: Custom apps with higher precedence can potentially overwrite the index and There are additional SmartStore settings available for tuning and storage management. The settings are equivalent to the SmartStore settings defined in indexes.conf and server.conf for Splunk Enterprise. The SmartStore resource applies to the `Standalone` and `ClusterMaster` Custom Resources, and adds the following `Spec` configuration parameters: -``` +```yaml smartstore: description: Splunk Smartstore configuration. Refer to indexes.conf.spec and @@ -255,4 +260,4 @@ path = remote.s3.encryption = sse-s3 ``` 2. Apply the CR with the necessary & supported Smartstore and Index related configs -3. Install the App created using the [currently supported methods](https://splunk.github.io/splunk-operator/Examples.html#installing-splunk-apps) (*Note: This can be combined with the previous step*) \ No newline at end of file +3. Install the App created using the [currently supported methods](https://splunk.github.io/splunk-operator/Examples.html#installing-splunk-apps) (*Note: This can be combined with the previous step*) diff --git a/docs/StorageClass.md b/docs/StorageClass.md index c8c4e22e7..003394391 100644 --- a/docs/StorageClass.md +++ b/docs/StorageClass.md @@ -14,7 +14,7 @@ You can customize the storage capacity and storage class name used by the `/opt/ For example: ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: example @@ -59,7 +59,7 @@ For testing and demonstration of Splunk Enterprise instances, you have the optio For example: ```yaml -apiVersion: enterprise.splunk.com/v1 +apiVersion: enterprise.splunk.com/v2 kind: Standalone metadata: name: example diff --git a/go.mod b/go.mod index 0ffa8edd9..169610a5b 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect github.com/aws/aws-sdk-go v1.37.24 github.com/go-logr/logr v0.1.0 + github.com/minio/minio-go/v7 v7.0.10 github.com/onsi/ginkgo v1.12.0 github.com/onsi/gomega v1.9.0 github.com/operator-framework/operator-sdk v0.18.2 @@ -43,8 +44,12 @@ replace ( k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.17 ) -replace github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 // Required by Helm +replace ( + github.com/docker/docker => github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 // Required by Helm + + github.com/openshift/api => github.com/openshift/api v0.0.0-20190924102528-32369d4db2ad // Required until https://github.com/operator-framework/operator-lifecycle-manager/pull/1241 is resolved -replace github.com/openshift/api => github.com/openshift/api v0.0.0-20190924102528-32369d4db2ad // Required until https://github.com/operator-framework/operator-lifecycle-manager/pull/1241 is resolved + golang.org/x/net => golang.org/x/net v0.0.0-20210614182718-04defd469f4e // CVE-2021-33194 and CVE-2021-31525 -replace golang.org/x/text => golang.org/x/text v0.3.5 // Fix CVE-2020-14040 and CVE-2020-28852 + golang.org/x/text => golang.org/x/text v0.3.5 // Fix CVE-2020-14040 and CVE-2020-28852 +) diff --git a/go.sum b/go.sum index 0dc9f28c5..628f9d750 100644 --- a/go.sum +++ b/go.sum @@ -502,6 +502,7 @@ github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEo github.com/gophercloud/gophercloud v0.6.0 h1:Xb2lcqZtml1XjgYZxbeayEemq7ASbeTp09m36gQFpEU= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= @@ -604,9 +605,12 @@ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jsonnet-bundler/jsonnet-bundler v0.3.1/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -621,6 +625,9 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -701,12 +708,18 @@ github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= +github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= +github.com/minio/minio-go/v7 v7.0.10 h1:1oUKe4EOPUEhw2qnPQaPsJ0lmVTYLFu03SiItauXs94= +github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo= +github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -883,6 +896,8 @@ github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -921,8 +936,10 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= @@ -1070,8 +1087,7 @@ golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904 h1:bXoxMPcSLOq08zI3/c5dEBT6lE4eh+jOh886GHrn6V8= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1101,49 +1117,8 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1211,8 +1186,11 @@ golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1354,6 +1332,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= +gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= diff --git a/pkg/apis/addtoscheme_enterprise_v2.go b/pkg/apis/addtoscheme_enterprise_v2.go new file mode 100644 index 000000000..a10585439 --- /dev/null +++ b/pkg/apis/addtoscheme_enterprise_v2.go @@ -0,0 +1,24 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package apis + +import ( + v2 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, v2.SchemeBuilder.AddToScheme) +} diff --git a/pkg/apis/enterprise/v1/zz_generated.deepcopy.go b/pkg/apis/enterprise/v1/zz_generated.deepcopy.go index cbbda3805..9375ac27e 100644 --- a/pkg/apis/enterprise/v1/zz_generated.deepcopy.go +++ b/pkg/apis/enterprise/v1/zz_generated.deepcopy.go @@ -3,763 +3,3 @@ // Code generated by operator-sdk. DO NOT EDIT. package v1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BundlePushInfo) DeepCopyInto(out *BundlePushInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundlePushInfo. -func (in *BundlePushInfo) DeepCopy() *BundlePushInfo { - if in == nil { - return nil - } - out := new(BundlePushInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { - *out = *in - out.IndexAndCacheManagerCommonSpec = in.IndexAndCacheManagerCommonSpec - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheManagerSpec. -func (in *CacheManagerSpec) DeepCopy() *CacheManagerSpec { - if in == nil { - return nil - } - out := new(CacheManagerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterMaster) DeepCopyInto(out *ClusterMaster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaster. -func (in *ClusterMaster) DeepCopy() *ClusterMaster { - if in == nil { - return nil - } - out := new(ClusterMaster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterMaster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterMasterList) DeepCopyInto(out *ClusterMasterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterMaster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterList. -func (in *ClusterMasterList) DeepCopy() *ClusterMasterList { - if in == nil { - return nil - } - out := new(ClusterMasterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterMasterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterMasterSpec) DeepCopyInto(out *ClusterMasterSpec) { - *out = *in - in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - in.SmartStore.DeepCopyInto(&out.SmartStore) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterSpec. -func (in *ClusterMasterSpec) DeepCopy() *ClusterMasterSpec { - if in == nil { - return nil - } - out := new(ClusterMasterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterMasterStatus) DeepCopyInto(out *ClusterMasterStatus) { - *out = *in - in.SmartStore.DeepCopyInto(&out.SmartStore) - out.BundlePushTracker = in.BundlePushTracker - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterStatus. -func (in *ClusterMasterStatus) DeepCopy() *ClusterMasterStatus { - if in == nil { - return nil - } - out := new(ClusterMasterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { - *out = *in - in.Spec.DeepCopyInto(&out.Spec) - out.EtcVolumeStorageConfig = in.EtcVolumeStorageConfig - out.VarVolumeStorageConfig = in.VarVolumeStorageConfig - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]corev1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - out.LicenseMasterRef = in.LicenseMasterRef - out.ClusterMasterRef = in.ClusterMasterRef - if in.ExtraEnv != nil { - in, out := &in.ExtraEnv, &out.ExtraEnv - *out = make([]corev1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSplunkSpec. -func (in *CommonSplunkSpec) DeepCopy() *CommonSplunkSpec { - if in == nil { - return nil - } - out := new(CommonSplunkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexAndCacheManagerCommonSpec) DeepCopyInto(out *IndexAndCacheManagerCommonSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexAndCacheManagerCommonSpec. -func (in *IndexAndCacheManagerCommonSpec) DeepCopy() *IndexAndCacheManagerCommonSpec { - if in == nil { - return nil - } - out := new(IndexAndCacheManagerCommonSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexAndGlobalCommonSpec) DeepCopyInto(out *IndexAndGlobalCommonSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexAndGlobalCommonSpec. -func (in *IndexAndGlobalCommonSpec) DeepCopy() *IndexAndGlobalCommonSpec { - if in == nil { - return nil - } - out := new(IndexAndGlobalCommonSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexConfDefaultsSpec) DeepCopyInto(out *IndexConfDefaultsSpec) { - *out = *in - out.IndexAndGlobalCommonSpec = in.IndexAndGlobalCommonSpec - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexConfDefaultsSpec. -func (in *IndexConfDefaultsSpec) DeepCopy() *IndexConfDefaultsSpec { - if in == nil { - return nil - } - out := new(IndexConfDefaultsSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexSpec) DeepCopyInto(out *IndexSpec) { - *out = *in - out.IndexAndCacheManagerCommonSpec = in.IndexAndCacheManagerCommonSpec - out.IndexAndGlobalCommonSpec = in.IndexAndGlobalCommonSpec - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexSpec. -func (in *IndexSpec) DeepCopy() *IndexSpec { - if in == nil { - return nil - } - out := new(IndexSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerCluster) DeepCopyInto(out *IndexerCluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerCluster. -func (in *IndexerCluster) DeepCopy() *IndexerCluster { - if in == nil { - return nil - } - out := new(IndexerCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IndexerCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerClusterList) DeepCopyInto(out *IndexerClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]IndexerCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterList. -func (in *IndexerClusterList) DeepCopy() *IndexerClusterList { - if in == nil { - return nil - } - out := new(IndexerClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IndexerClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerClusterMemberStatus) DeepCopyInto(out *IndexerClusterMemberStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterMemberStatus. -func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { - if in == nil { - return nil - } - out := new(IndexerClusterMemberStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { - *out = *in - in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. -func (in *IndexerClusterSpec) DeepCopy() *IndexerClusterSpec { - if in == nil { - return nil - } - out := new(IndexerClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { - *out = *in - if in.IndexerSecretChanged != nil { - in, out := &in.IndexerSecretChanged, &out.IndexerSecretChanged - *out = make([]bool, len(*in)) - copy(*out, *in) - } - if in.IdxcPasswordChangedSecrets != nil { - in, out := &in.IdxcPasswordChangedSecrets, &out.IdxcPasswordChangedSecrets - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Peers != nil { - in, out := &in.Peers, &out.Peers - *out = make([]IndexerClusterMemberStatus, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. -func (in *IndexerClusterStatus) DeepCopy() *IndexerClusterStatus { - if in == nil { - return nil - } - out := new(IndexerClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseMaster) DeepCopyInto(out *LicenseMaster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMaster. -func (in *LicenseMaster) DeepCopy() *LicenseMaster { - if in == nil { - return nil - } - out := new(LicenseMaster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LicenseMaster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseMasterList) DeepCopyInto(out *LicenseMasterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LicenseMaster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMasterList. -func (in *LicenseMasterList) DeepCopy() *LicenseMasterList { - if in == nil { - return nil - } - out := new(LicenseMasterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LicenseMasterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseMasterSpec) DeepCopyInto(out *LicenseMasterSpec) { - *out = *in - in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMasterSpec. -func (in *LicenseMasterSpec) DeepCopy() *LicenseMasterSpec { - if in == nil { - return nil - } - out := new(LicenseMasterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseMasterStatus) DeepCopyInto(out *LicenseMasterStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMasterStatus. -func (in *LicenseMasterStatus) DeepCopy() *LicenseMasterStatus { - if in == nil { - return nil - } - out := new(LicenseMasterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadCluster) DeepCopyInto(out *SearchHeadCluster) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadCluster. -func (in *SearchHeadCluster) DeepCopy() *SearchHeadCluster { - if in == nil { - return nil - } - out := new(SearchHeadCluster) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SearchHeadCluster) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadClusterList) DeepCopyInto(out *SearchHeadClusterList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]SearchHeadCluster, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterList. -func (in *SearchHeadClusterList) DeepCopy() *SearchHeadClusterList { - if in == nil { - return nil - } - out := new(SearchHeadClusterList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SearchHeadClusterList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadClusterMemberStatus) DeepCopyInto(out *SearchHeadClusterMemberStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterMemberStatus. -func (in *SearchHeadClusterMemberStatus) DeepCopy() *SearchHeadClusterMemberStatus { - if in == nil { - return nil - } - out := new(SearchHeadClusterMemberStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadClusterSpec) DeepCopyInto(out *SearchHeadClusterSpec) { - *out = *in - in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterSpec. -func (in *SearchHeadClusterSpec) DeepCopy() *SearchHeadClusterSpec { - if in == nil { - return nil - } - out := new(SearchHeadClusterSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SearchHeadClusterStatus) DeepCopyInto(out *SearchHeadClusterStatus) { - *out = *in - if in.ShcSecretChanged != nil { - in, out := &in.ShcSecretChanged, &out.ShcSecretChanged - *out = make([]bool, len(*in)) - copy(*out, *in) - } - if in.AdminSecretChanged != nil { - in, out := &in.AdminSecretChanged, &out.AdminSecretChanged - *out = make([]bool, len(*in)) - copy(*out, *in) - } - if in.AdminPasswordChangedSecrets != nil { - in, out := &in.AdminPasswordChangedSecrets, &out.AdminPasswordChangedSecrets - *out = make(map[string]bool, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Members != nil { - in, out := &in.Members, &out.Members - *out = make([]SearchHeadClusterMemberStatus, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterStatus. -func (in *SearchHeadClusterStatus) DeepCopy() *SearchHeadClusterStatus { - if in == nil { - return nil - } - out := new(SearchHeadClusterStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SmartStoreSpec) DeepCopyInto(out *SmartStoreSpec) { - *out = *in - if in.VolList != nil { - in, out := &in.VolList, &out.VolList - *out = make([]VolumeSpec, len(*in)) - copy(*out, *in) - } - if in.IndexList != nil { - in, out := &in.IndexList, &out.IndexList - *out = make([]IndexSpec, len(*in)) - copy(*out, *in) - } - out.Defaults = in.Defaults - out.CacheManagerConf = in.CacheManagerConf - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartStoreSpec. -func (in *SmartStoreSpec) DeepCopy() *SmartStoreSpec { - if in == nil { - return nil - } - out := new(SmartStoreSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Standalone) DeepCopyInto(out *Standalone) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Standalone. -func (in *Standalone) DeepCopy() *Standalone { - if in == nil { - return nil - } - out := new(Standalone) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Standalone) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StandaloneList) DeepCopyInto(out *StandaloneList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Standalone, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneList. -func (in *StandaloneList) DeepCopy() *StandaloneList { - if in == nil { - return nil - } - out := new(StandaloneList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StandaloneList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StandaloneSpec) DeepCopyInto(out *StandaloneSpec) { - *out = *in - in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - in.SmartStore.DeepCopyInto(&out.SmartStore) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneSpec. -func (in *StandaloneSpec) DeepCopy() *StandaloneSpec { - if in == nil { - return nil - } - out := new(StandaloneSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StandaloneStatus) DeepCopyInto(out *StandaloneStatus) { - *out = *in - in.SmartStore.DeepCopyInto(&out.SmartStore) - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneStatus. -func (in *StandaloneStatus) DeepCopy() *StandaloneStatus { - if in == nil { - return nil - } - out := new(StandaloneStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageClassSpec) DeepCopyInto(out *StorageClassSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassSpec. -func (in *StorageClassSpec) DeepCopy() *StorageClassSpec { - if in == nil { - return nil - } - out := new(StorageClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec. -func (in *VolumeSpec) DeepCopy() *VolumeSpec { - if in == nil { - return nil - } - out := new(VolumeSpec) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/enterprise/v1/clustermaster_types.go b/pkg/apis/enterprise/v2/clustermaster_types.go similarity index 92% rename from pkg/apis/enterprise/v1/clustermaster_types.go rename to pkg/apis/enterprise/v2/clustermaster_types.go index 5734f8d3b..345d46224 100644 --- a/pkg/apis/enterprise/v1/clustermaster_types.go +++ b/pkg/apis/enterprise/v2/clustermaster_types.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1 +package v2 import ( splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" @@ -33,6 +33,9 @@ type ClusterMasterSpec struct { // Splunk Smartstore configuration. Refer to indexes.conf.spec and server.conf.spec on docs.splunk.com SmartStore SmartStoreSpec `json:"smartstore,omitempty"` + + // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management + AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` } // ClusterMasterStatus defines the observed state of ClusterMaster @@ -51,6 +54,9 @@ type ClusterMasterStatus struct { // Resource Revision tracker ResourceRevMap map[string]string `json:"resourceRevMap"` + + // App Framework status + AppContext AppDeploymentContext `json:"appContext"` } // BundlePushInfo Indicates if bundle push required diff --git a/pkg/apis/enterprise/v1/common_types.go b/pkg/apis/enterprise/v2/common_types.go similarity index 56% rename from pkg/apis/enterprise/v1/common_types.go rename to pkg/apis/enterprise/v2/common_types.go index d6ad95fc1..98ee25bdd 100644 --- a/pkg/apis/enterprise/v1/common_types.go +++ b/pkg/apis/enterprise/v2/common_types.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1 +package v2 import ( corev1 "k8s.io/api/core/v1" @@ -22,7 +22,7 @@ import ( const ( // APIVersion is a string representation of this API - APIVersion = "enterprise.splunk.com/v1" + APIVersion = "enterprise.splunk.com/v2" ) // default all fields to being optional @@ -33,6 +33,37 @@ const ( // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html // see also https://book.kubebuilder.io/reference/markers/crd.html +//CAUTION: Do not change json field tags, otherwise the configuration will not be backward compatible with the existing CRs + +// AppRepoState represent the App state on remote store +type AppRepoState uint8 + +// Values to represent the App Repo status +const ( + RepoStateActive AppRepoState = iota + 1 + RepoStateDeleted + RepoStatePassive +) + +// AppDeploymentStatus represents the status of an App on the Pod +type AppDeploymentStatus uint8 + +// Values to represent the Pod App deployment status +const ( + // Indicates there is a change on remote store, but yet to start propagating that to the Pod + DeployStatusPending AppDeploymentStatus = iota + 1 + + // App update on the Pod is in progress + //ToDo: Mostly transient state for Phase-2, more of Phase-3 status + DeployStatusInProgress + + // App is update is complete on the Pod + DeployStatusComplete + + // Failed to update the App on the Pod + DeployStatusError +) + // CommonSplunkSpec defines the desired state of parameters that are common across all Splunk Enterprise CRD types type CommonSplunkSpec struct { splcommon.Spec `json:",inline"` @@ -78,6 +109,14 @@ type CommonSplunkSpec struct { // ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers // WARNING: Setting environment variables used by Splunk or Ansible will affect Splunk installation and operation ExtraEnv []corev1.EnvVar `json:"extraEnv,omitempty"` + + // ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe + // Note: If needed, Operator overrides with a higher value + ReadinessInitialDelaySeconds int32 `json:"readinessInitialDelaySeconds"` + + // LivenessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command) for the Liveness probe + // Note: If needed, Operator overrides with a higher value + LivenessInitialDelaySeconds int32 `json:"livenessInitialDelaySeconds"` } // StorageClassSpec defines storage class configuration @@ -132,7 +171,7 @@ type IndexConfDefaultsSpec struct { IndexAndGlobalCommonSpec `json:",inline"` } -// VolumeSpec defines remote volume name and remote volume URI +// VolumeSpec defines remote volume config type VolumeSpec struct { // Remote volume name Name string `json:"name"` @@ -145,6 +184,17 @@ type VolumeSpec struct { // Secret object name SecretRef string `json:"secretRef"` + + // Remote Storage type. Supported values: s3 + Type string `json:"storageType"` + + // App Package Remote Store provider. Supported values: aws, minio + Provider string `json:"provider"` +} + +// VolumeAndTypeSpec used to add any custom varaibles for volume implementation +type VolumeAndTypeSpec struct { + VolumeSpec `json:",inline"` } // IndexSpec defines Splunk index name and storage path @@ -181,3 +231,82 @@ type IndexAndCacheManagerCommonSpec struct { // Time period relative to the bucket's age, during which the bloom filter file is protected from cache eviction HotlistBloomFilterRecencyHours uint `json:"hotlistBloomFilterRecencyHours,omitempty"` } + +// AppSourceDefaultSpec defines config common for defaults and App Sources +type AppSourceDefaultSpec struct { + // Remote Storage Volume name + VolName string `json:"volumeName,omitempty"` + + // Scope of the App deployment: cluster, local. Scope determines whether the App(s) is/are installed locally or cluster-wide + Scope string `json:"scope,omitempty"` +} + +// AppSourceSpec defines list of App package (*.spl, *.tgz) locations on remote volumes +type AppSourceSpec struct { + // Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + Name string `json:"name"` + + // Location relative to the volume path + Location string `json:"location"` + + AppSourceDefaultSpec `json:",inline"` +} + +// AppFrameworkSpec defines the application package remote store repository +type AppFrameworkSpec struct { + // Defines the default configuration settings for App sources + Defaults AppSourceDefaultSpec `json:"defaults,omitempty"` + + // Interval in seconds to check the Remote Storage for App changes. + // The default value for this config is 1 hour(3600 sec), + // minimum value is 1 minute(60sec) and maximum value is 1 day(86400 sec). + // We assign the value based on following conditions - + // 1. If no value or 0 is specified then it will be defaulted to 1 hour. + // 2. If anything less than min is specified then we set it to 1 min. + // 3. If anything more than the max value is specified then we set it to 1 day. + AppsRepoPollInterval int64 `json:"appsRepoPollIntervalSeconds,omitempty"` + + // List of remote storage volumes + VolList []VolumeSpec `json:"volumes,omitempty"` + + // List of App sources on remote storage + AppSources []AppSourceSpec `json:"appSources,omitempty"` +} + +// AppDeploymentInfo represents a single App deployment information +type AppDeploymentInfo struct { + AppName string `json:"appName"` + LastModifiedTime string `json:"lastModifiedTime,omitempty"` + ObjectHash string `json:"objectHash"` + Size uint64 `json:"Size,omitempty"` + RepoState AppRepoState `json:"repoState"` + DeployStatus AppDeploymentStatus `json:"deployStatus"` +} + +// AppSrcDeployInfo represents deployment info for list of Apps +type AppSrcDeployInfo struct { + AppDeploymentInfoList []AppDeploymentInfo `json:"appDeploymentInfo,omitempty"` +} + +// AppDeploymentContext for storing the Apps deployment information +type AppDeploymentContext struct { + // App Framework version info for future use + Version uint16 `json:"version"` + + // IsDeploymentInProgress indicates if the Apps deployment is in progress + IsDeploymentInProgress bool `json:"isDeploymentInProgress"` + + // List of App package (*.spl, *.tgz) locations on remote volume + AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + + // Represents the Apps deployment status + AppsSrcDeployStatus map[string]AppSrcDeployInfo `json:"appSrcDeployStatus,omitempty"` + + // This is set to the time when we get the list of apps from remote storage. + LastAppInfoCheckTime int64 `json:"lastAppInfoCheckTime"` + + // Interval in seconds to check the Remote Storage for App changes + // This is introduced here so that we dont do spec validation in every reconcile just + // because the spec and status are different. + AppsRepoStatusPollInterval int64 `json:"appsRepoStatusPollIntervalSeconds,omitempty"` +} diff --git a/pkg/apis/enterprise/v2/doc.go b/pkg/apis/enterprise/v2/doc.go new file mode 100644 index 000000000..dab82d089 --- /dev/null +++ b/pkg/apis/enterprise/v2/doc.go @@ -0,0 +1,18 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v2 contains API Schema definitions for the enterprise v2 API group +// +k8s:deepcopy-gen=package,register +// +groupName=enterprise.splunk.com +package v2 diff --git a/pkg/apis/enterprise/v1/indexercluster_types.go b/pkg/apis/enterprise/v2/indexercluster_types.go similarity index 99% rename from pkg/apis/enterprise/v1/indexercluster_types.go rename to pkg/apis/enterprise/v2/indexercluster_types.go index 84fbe8597..30971bbed 100644 --- a/pkg/apis/enterprise/v1/indexercluster_types.go +++ b/pkg/apis/enterprise/v2/indexercluster_types.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1 +package v2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/apis/enterprise/v1/licensemaster_types.go b/pkg/apis/enterprise/v2/licensemaster_types.go similarity index 91% rename from pkg/apis/enterprise/v1/licensemaster_types.go rename to pkg/apis/enterprise/v2/licensemaster_types.go index 907422197..033ac865a 100644 --- a/pkg/apis/enterprise/v1/licensemaster_types.go +++ b/pkg/apis/enterprise/v2/licensemaster_types.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1 +package v2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,12 +31,18 @@ import ( // LicenseMasterSpec defines the desired state of a Splunk Enterprise license master. type LicenseMasterSpec struct { CommonSplunkSpec `json:",inline"` + + // Splunk enterprise App repository. Specifies remote App location and scope for Splunk App management + AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` } // LicenseMasterStatus defines the observed state of a Splunk Enterprise license master. type LicenseMasterStatus struct { // current phase of the license master Phase splcommon.Phase `json:"phase"` + + // App Framework Context + AppContext AppDeploymentContext `json:"appContext"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/enterprise/v2/register.go b/pkg/apis/enterprise/v2/register.go new file mode 100644 index 000000000..684d3d08b --- /dev/null +++ b/pkg/apis/enterprise/v2/register.go @@ -0,0 +1,33 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// NOTE: Boilerplate only. Ignore this file. + +// Package v2 contains API Schema definitions for the enterprise v2 API group +// +k8s:deepcopy-gen=package,register +// +groupName=enterprise.splunk.com +package v2 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: "enterprise.splunk.com", Version: "v2"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) diff --git a/pkg/apis/enterprise/v1/searchheadcluster_types.go b/pkg/apis/enterprise/v2/searchheadcluster_types.go similarity index 95% rename from pkg/apis/enterprise/v1/searchheadcluster_types.go rename to pkg/apis/enterprise/v2/searchheadcluster_types.go index 255d438d2..e548aaf0e 100644 --- a/pkg/apis/enterprise/v1/searchheadcluster_types.go +++ b/pkg/apis/enterprise/v2/searchheadcluster_types.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1 +package v2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +34,9 @@ type SearchHeadClusterSpec struct { // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` + + // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management + AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` } // SearchHeadClusterMemberStatus is used to track the status of each search head cluster member @@ -103,6 +106,9 @@ type SearchHeadClusterStatus struct { // status of each search head cluster member Members []SearchHeadClusterMemberStatus `json:"members"` + + // App Framework Context + AppContext AppDeploymentContext `json:"appContext"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/enterprise/v1/standalone_types.go b/pkg/apis/enterprise/v2/standalone_types.go similarity index 93% rename from pkg/apis/enterprise/v1/standalone_types.go rename to pkg/apis/enterprise/v2/standalone_types.go index d512d554b..f697cc784 100644 --- a/pkg/apis/enterprise/v1/standalone_types.go +++ b/pkg/apis/enterprise/v2/standalone_types.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v1 +package v2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,6 +37,9 @@ type StandaloneSpec struct { //Splunk Smartstore configuration. Refer to indexes.conf.spec and server.conf.spec on docs.splunk.com SmartStore SmartStoreSpec `json:"smartstore,omitempty"` + + // Splunk Enterprise App repository. Specifies remote App location and scope for Splunk App management + AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` } // StandaloneStatus defines the observed state of a Splunk Enterprise standalone instances. @@ -58,6 +61,9 @@ type StandaloneStatus struct { // Resource Revision tracker ResourceRevMap map[string]string `json:"resourceRevMap"` + + // App Framework Context + AppContext AppDeploymentContext `json:"appContext"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/enterprise/v2/zz_generated.deepcopy.go b/pkg/apis/enterprise/v2/zz_generated.deepcopy.go new file mode 100644 index 000000000..c1e3d0c8f --- /dev/null +++ b/pkg/apis/enterprise/v2/zz_generated.deepcopy.go @@ -0,0 +1,911 @@ +// +build !ignore_autogenerated + +// Code generated by operator-sdk. DO NOT EDIT. + +package v2 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppDeploymentContext) DeepCopyInto(out *AppDeploymentContext) { + *out = *in + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + if in.AppsSrcDeployStatus != nil { + in, out := &in.AppsSrcDeployStatus, &out.AppsSrcDeployStatus + *out = make(map[string]AppSrcDeployInfo, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppDeploymentContext. +func (in *AppDeploymentContext) DeepCopy() *AppDeploymentContext { + if in == nil { + return nil + } + out := new(AppDeploymentContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppDeploymentInfo) DeepCopyInto(out *AppDeploymentInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppDeploymentInfo. +func (in *AppDeploymentInfo) DeepCopy() *AppDeploymentInfo { + if in == nil { + return nil + } + out := new(AppDeploymentInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppFrameworkSpec) DeepCopyInto(out *AppFrameworkSpec) { + *out = *in + out.Defaults = in.Defaults + if in.VolList != nil { + in, out := &in.VolList, &out.VolList + *out = make([]VolumeSpec, len(*in)) + copy(*out, *in) + } + if in.AppSources != nil { + in, out := &in.AppSources, &out.AppSources + *out = make([]AppSourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppFrameworkSpec. +func (in *AppFrameworkSpec) DeepCopy() *AppFrameworkSpec { + if in == nil { + return nil + } + out := new(AppFrameworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppSourceDefaultSpec) DeepCopyInto(out *AppSourceDefaultSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppSourceDefaultSpec. +func (in *AppSourceDefaultSpec) DeepCopy() *AppSourceDefaultSpec { + if in == nil { + return nil + } + out := new(AppSourceDefaultSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppSourceSpec) DeepCopyInto(out *AppSourceSpec) { + *out = *in + out.AppSourceDefaultSpec = in.AppSourceDefaultSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppSourceSpec. +func (in *AppSourceSpec) DeepCopy() *AppSourceSpec { + if in == nil { + return nil + } + out := new(AppSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppSrcDeployInfo) DeepCopyInto(out *AppSrcDeployInfo) { + *out = *in + if in.AppDeploymentInfoList != nil { + in, out := &in.AppDeploymentInfoList, &out.AppDeploymentInfoList + *out = make([]AppDeploymentInfo, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppSrcDeployInfo. +func (in *AppSrcDeployInfo) DeepCopy() *AppSrcDeployInfo { + if in == nil { + return nil + } + out := new(AppSrcDeployInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BundlePushInfo) DeepCopyInto(out *BundlePushInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundlePushInfo. +func (in *BundlePushInfo) DeepCopy() *BundlePushInfo { + if in == nil { + return nil + } + out := new(BundlePushInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { + *out = *in + out.IndexAndCacheManagerCommonSpec = in.IndexAndCacheManagerCommonSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CacheManagerSpec. +func (in *CacheManagerSpec) DeepCopy() *CacheManagerSpec { + if in == nil { + return nil + } + out := new(CacheManagerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMaster) DeepCopyInto(out *ClusterMaster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMaster. +func (in *ClusterMaster) DeepCopy() *ClusterMaster { + if in == nil { + return nil + } + out := new(ClusterMaster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMaster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMasterList) DeepCopyInto(out *ClusterMasterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterMaster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterList. +func (in *ClusterMasterList) DeepCopy() *ClusterMasterList { + if in == nil { + return nil + } + out := new(ClusterMasterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterMasterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMasterSpec) DeepCopyInto(out *ClusterMasterSpec) { + *out = *in + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + in.SmartStore.DeepCopyInto(&out.SmartStore) + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterSpec. +func (in *ClusterMasterSpec) DeepCopy() *ClusterMasterSpec { + if in == nil { + return nil + } + out := new(ClusterMasterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterMasterStatus) DeepCopyInto(out *ClusterMasterStatus) { + *out = *in + in.SmartStore.DeepCopyInto(&out.SmartStore) + out.BundlePushTracker = in.BundlePushTracker + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.AppContext.DeepCopyInto(&out.AppContext) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterMasterStatus. +func (in *ClusterMasterStatus) DeepCopy() *ClusterMasterStatus { + if in == nil { + return nil + } + out := new(ClusterMasterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonSplunkSpec) DeepCopyInto(out *CommonSplunkSpec) { + *out = *in + in.Spec.DeepCopyInto(&out.Spec) + out.EtcVolumeStorageConfig = in.EtcVolumeStorageConfig + out.VarVolumeStorageConfig = in.VarVolumeStorageConfig + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.LicenseMasterRef = in.LicenseMasterRef + out.ClusterMasterRef = in.ClusterMasterRef + if in.ExtraEnv != nil { + in, out := &in.ExtraEnv, &out.ExtraEnv + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSplunkSpec. +func (in *CommonSplunkSpec) DeepCopy() *CommonSplunkSpec { + if in == nil { + return nil + } + out := new(CommonSplunkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexAndCacheManagerCommonSpec) DeepCopyInto(out *IndexAndCacheManagerCommonSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexAndCacheManagerCommonSpec. +func (in *IndexAndCacheManagerCommonSpec) DeepCopy() *IndexAndCacheManagerCommonSpec { + if in == nil { + return nil + } + out := new(IndexAndCacheManagerCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexAndGlobalCommonSpec) DeepCopyInto(out *IndexAndGlobalCommonSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexAndGlobalCommonSpec. +func (in *IndexAndGlobalCommonSpec) DeepCopy() *IndexAndGlobalCommonSpec { + if in == nil { + return nil + } + out := new(IndexAndGlobalCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexConfDefaultsSpec) DeepCopyInto(out *IndexConfDefaultsSpec) { + *out = *in + out.IndexAndGlobalCommonSpec = in.IndexAndGlobalCommonSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexConfDefaultsSpec. +func (in *IndexConfDefaultsSpec) DeepCopy() *IndexConfDefaultsSpec { + if in == nil { + return nil + } + out := new(IndexConfDefaultsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexSpec) DeepCopyInto(out *IndexSpec) { + *out = *in + out.IndexAndCacheManagerCommonSpec = in.IndexAndCacheManagerCommonSpec + out.IndexAndGlobalCommonSpec = in.IndexAndGlobalCommonSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexSpec. +func (in *IndexSpec) DeepCopy() *IndexSpec { + if in == nil { + return nil + } + out := new(IndexSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexerCluster) DeepCopyInto(out *IndexerCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerCluster. +func (in *IndexerCluster) DeepCopy() *IndexerCluster { + if in == nil { + return nil + } + out := new(IndexerCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexerCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexerClusterList) DeepCopyInto(out *IndexerClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IndexerCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterList. +func (in *IndexerClusterList) DeepCopy() *IndexerClusterList { + if in == nil { + return nil + } + out := new(IndexerClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IndexerClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexerClusterMemberStatus) DeepCopyInto(out *IndexerClusterMemberStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterMemberStatus. +func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { + if in == nil { + return nil + } + out := new(IndexerClusterMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { + *out = *in + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. +func (in *IndexerClusterSpec) DeepCopy() *IndexerClusterSpec { + if in == nil { + return nil + } + out := new(IndexerClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { + *out = *in + if in.IndexerSecretChanged != nil { + in, out := &in.IndexerSecretChanged, &out.IndexerSecretChanged + *out = make([]bool, len(*in)) + copy(*out, *in) + } + if in.IdxcPasswordChangedSecrets != nil { + in, out := &in.IdxcPasswordChangedSecrets, &out.IdxcPasswordChangedSecrets + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make([]IndexerClusterMemberStatus, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. +func (in *IndexerClusterStatus) DeepCopy() *IndexerClusterStatus { + if in == nil { + return nil + } + out := new(IndexerClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseMaster) DeepCopyInto(out *LicenseMaster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMaster. +func (in *LicenseMaster) DeepCopy() *LicenseMaster { + if in == nil { + return nil + } + out := new(LicenseMaster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LicenseMaster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseMasterList) DeepCopyInto(out *LicenseMasterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LicenseMaster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMasterList. +func (in *LicenseMasterList) DeepCopy() *LicenseMasterList { + if in == nil { + return nil + } + out := new(LicenseMasterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LicenseMasterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseMasterSpec) DeepCopyInto(out *LicenseMasterSpec) { + *out = *in + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMasterSpec. +func (in *LicenseMasterSpec) DeepCopy() *LicenseMasterSpec { + if in == nil { + return nil + } + out := new(LicenseMasterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LicenseMasterStatus) DeepCopyInto(out *LicenseMasterStatus) { + *out = *in + in.AppContext.DeepCopyInto(&out.AppContext) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseMasterStatus. +func (in *LicenseMasterStatus) DeepCopy() *LicenseMasterStatus { + if in == nil { + return nil + } + out := new(LicenseMasterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchHeadCluster) DeepCopyInto(out *SearchHeadCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadCluster. +func (in *SearchHeadCluster) DeepCopy() *SearchHeadCluster { + if in == nil { + return nil + } + out := new(SearchHeadCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SearchHeadCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchHeadClusterList) DeepCopyInto(out *SearchHeadClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SearchHeadCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterList. +func (in *SearchHeadClusterList) DeepCopy() *SearchHeadClusterList { + if in == nil { + return nil + } + out := new(SearchHeadClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SearchHeadClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchHeadClusterMemberStatus) DeepCopyInto(out *SearchHeadClusterMemberStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterMemberStatus. +func (in *SearchHeadClusterMemberStatus) DeepCopy() *SearchHeadClusterMemberStatus { + if in == nil { + return nil + } + out := new(SearchHeadClusterMemberStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchHeadClusterSpec) DeepCopyInto(out *SearchHeadClusterSpec) { + *out = *in + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterSpec. +func (in *SearchHeadClusterSpec) DeepCopy() *SearchHeadClusterSpec { + if in == nil { + return nil + } + out := new(SearchHeadClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SearchHeadClusterStatus) DeepCopyInto(out *SearchHeadClusterStatus) { + *out = *in + if in.ShcSecretChanged != nil { + in, out := &in.ShcSecretChanged, &out.ShcSecretChanged + *out = make([]bool, len(*in)) + copy(*out, *in) + } + if in.AdminSecretChanged != nil { + in, out := &in.AdminSecretChanged, &out.AdminSecretChanged + *out = make([]bool, len(*in)) + copy(*out, *in) + } + if in.AdminPasswordChangedSecrets != nil { + in, out := &in.AdminPasswordChangedSecrets, &out.AdminPasswordChangedSecrets + *out = make(map[string]bool, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Members != nil { + in, out := &in.Members, &out.Members + *out = make([]SearchHeadClusterMemberStatus, len(*in)) + copy(*out, *in) + } + in.AppContext.DeepCopyInto(&out.AppContext) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SearchHeadClusterStatus. +func (in *SearchHeadClusterStatus) DeepCopy() *SearchHeadClusterStatus { + if in == nil { + return nil + } + out := new(SearchHeadClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SmartStoreSpec) DeepCopyInto(out *SmartStoreSpec) { + *out = *in + if in.VolList != nil { + in, out := &in.VolList, &out.VolList + *out = make([]VolumeSpec, len(*in)) + copy(*out, *in) + } + if in.IndexList != nil { + in, out := &in.IndexList, &out.IndexList + *out = make([]IndexSpec, len(*in)) + copy(*out, *in) + } + out.Defaults = in.Defaults + out.CacheManagerConf = in.CacheManagerConf + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SmartStoreSpec. +func (in *SmartStoreSpec) DeepCopy() *SmartStoreSpec { + if in == nil { + return nil + } + out := new(SmartStoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Standalone) DeepCopyInto(out *Standalone) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Standalone. +func (in *Standalone) DeepCopy() *Standalone { + if in == nil { + return nil + } + out := new(Standalone) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Standalone) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandaloneList) DeepCopyInto(out *StandaloneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Standalone, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneList. +func (in *StandaloneList) DeepCopy() *StandaloneList { + if in == nil { + return nil + } + out := new(StandaloneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StandaloneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandaloneSpec) DeepCopyInto(out *StandaloneSpec) { + *out = *in + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + in.SmartStore.DeepCopyInto(&out.SmartStore) + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneSpec. +func (in *StandaloneSpec) DeepCopy() *StandaloneSpec { + if in == nil { + return nil + } + out := new(StandaloneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StandaloneStatus) DeepCopyInto(out *StandaloneStatus) { + *out = *in + in.SmartStore.DeepCopyInto(&out.SmartStore) + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.AppContext.DeepCopyInto(&out.AppContext) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandaloneStatus. +func (in *StandaloneStatus) DeepCopy() *StandaloneStatus { + if in == nil { + return nil + } + out := new(StandaloneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassSpec) DeepCopyInto(out *StorageClassSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassSpec. +func (in *StorageClassSpec) DeepCopy() *StorageClassSpec { + if in == nil { + return nil + } + out := new(StorageClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAndTypeSpec) DeepCopyInto(out *VolumeAndTypeSpec) { + *out = *in + out.VolumeSpec = in.VolumeSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAndTypeSpec. +func (in *VolumeAndTypeSpec) DeepCopy() *VolumeAndTypeSpec { + if in == nil { + return nil + } + out := new(VolumeAndTypeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSpec) DeepCopyInto(out *VolumeSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSpec. +func (in *VolumeSpec) DeepCopy() *VolumeSpec { + if in == nil { + return nil + } + out := new(VolumeSpec) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/controller/add_clustermaster.go b/pkg/controller/add_clustermaster.go index 8e916da58..b252f81f8 100644 --- a/pkg/controller/add_clustermaster.go +++ b/pkg/controller/add_clustermaster.go @@ -15,7 +15,7 @@ package controller import ( - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" @@ -39,9 +39,9 @@ type ClusterMasterController struct{} // GetInstance returns an instance of the custom resource managed by the controller func (ctrl ClusterMasterController) GetInstance() splcommon.MetaObject { - return &enterprisev1.ClusterMaster{ + return &enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ - APIVersion: enterprisev1.APIVersion, + APIVersion: enterpriseApi.APIVersion, Kind: "ClusterMaster", }, } @@ -54,6 +54,6 @@ func (ctrl ClusterMasterController) GetWatchTypes() []runtime.Object { // Reconcile is used to perform an idempotent reconciliation of the custom resource managed by this controller func (ctrl ClusterMasterController) Reconcile(client client.Client, cr splcommon.MetaObject) (reconcile.Result, error) { - instance := cr.(*enterprisev1.ClusterMaster) + instance := cr.(*enterpriseApi.ClusterMaster) return enterprise.ApplyClusterMaster(client, instance) } diff --git a/pkg/controller/add_indexercluster.go b/pkg/controller/add_indexercluster.go index beda3ce3e..5ed6fa5e9 100644 --- a/pkg/controller/add_indexercluster.go +++ b/pkg/controller/add_indexercluster.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" @@ -40,9 +40,9 @@ type IndexerClusterController struct{} // GetInstance returns an instance of the custom resource managed by the controller func (ctrl IndexerClusterController) GetInstance() splcommon.MetaObject { - return &enterprisev1.IndexerCluster{ + return &enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ - APIVersion: enterprisev1.APIVersion, + APIVersion: enterpriseApi.APIVersion, Kind: "IndexerCluster", }, } @@ -55,6 +55,6 @@ func (ctrl IndexerClusterController) GetWatchTypes() []runtime.Object { // Reconcile is used to perform an idempotent reconciliation of the custom resource managed by this controller func (ctrl IndexerClusterController) Reconcile(client client.Client, cr splcommon.MetaObject) (reconcile.Result, error) { - instance := cr.(*enterprisev1.IndexerCluster) + instance := cr.(*enterpriseApi.IndexerCluster) return enterprise.ApplyIndexerCluster(client, instance) } diff --git a/pkg/controller/add_licensemaster.go b/pkg/controller/add_licensemaster.go index 78635a013..c858773db 100644 --- a/pkg/controller/add_licensemaster.go +++ b/pkg/controller/add_licensemaster.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" @@ -40,9 +40,9 @@ type LicenseMasterController struct{} // GetInstance returns an instance of the custom resource managed by the controller func (ctrl LicenseMasterController) GetInstance() splcommon.MetaObject { - return &enterprisev1.LicenseMaster{ + return &enterpriseApi.LicenseMaster{ TypeMeta: metav1.TypeMeta{ - APIVersion: enterprisev1.APIVersion, + APIVersion: enterpriseApi.APIVersion, Kind: "LicenseMaster", }, } @@ -55,6 +55,6 @@ func (ctrl LicenseMasterController) GetWatchTypes() []runtime.Object { // Reconcile is used to perform an idempotent reconciliation of the custom resource managed by this controller func (ctrl LicenseMasterController) Reconcile(client client.Client, cr splcommon.MetaObject) (reconcile.Result, error) { - instance := cr.(*enterprisev1.LicenseMaster) + instance := cr.(*enterpriseApi.LicenseMaster) return enterprise.ApplyLicenseMaster(client, instance) } diff --git a/pkg/controller/add_searchheadcluster.go b/pkg/controller/add_searchheadcluster.go index 088b2af34..e3b223371 100644 --- a/pkg/controller/add_searchheadcluster.go +++ b/pkg/controller/add_searchheadcluster.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" @@ -40,9 +40,9 @@ type SearchHeadClusterController struct{} // GetInstance returns an instance of the custom resource managed by the controller func (ctrl SearchHeadClusterController) GetInstance() splcommon.MetaObject { - return &enterprisev1.SearchHeadCluster{ + return &enterpriseApi.SearchHeadCluster{ TypeMeta: metav1.TypeMeta{ - APIVersion: enterprisev1.APIVersion, + APIVersion: enterpriseApi.APIVersion, Kind: "SearchHeadCluster", }, } @@ -55,6 +55,6 @@ func (ctrl SearchHeadClusterController) GetWatchTypes() []runtime.Object { // Reconcile is used to perform an idempotent reconciliation of the custom resource managed by this controller func (ctrl SearchHeadClusterController) Reconcile(client client.Client, cr splcommon.MetaObject) (reconcile.Result, error) { - instance := cr.(*enterprisev1.SearchHeadCluster) + instance := cr.(*enterpriseApi.SearchHeadCluster) return enterprise.ApplySearchHeadCluster(client, instance) } diff --git a/pkg/controller/add_standalone.go b/pkg/controller/add_standalone.go index 8b6edbcf6..8286fa631 100644 --- a/pkg/controller/add_standalone.go +++ b/pkg/controller/add_standalone.go @@ -22,7 +22,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" @@ -40,9 +40,9 @@ type StandaloneController struct{} // GetInstance returns an instance of the custom resource managed by the controller func (ctrl StandaloneController) GetInstance() splcommon.MetaObject { - return &enterprisev1.Standalone{ + return &enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ - APIVersion: enterprisev1.APIVersion, + APIVersion: enterpriseApi.APIVersion, Kind: "Standalone", }, } @@ -55,6 +55,6 @@ func (ctrl StandaloneController) GetWatchTypes() []runtime.Object { // Reconcile is used to perform an idempotent reconciliation of the custom resource managed by this controller func (ctrl StandaloneController) Reconcile(client client.Client, cr splcommon.MetaObject) (reconcile.Result, error) { - instance := cr.(*enterprisev1.Standalone) + instance := cr.(*enterpriseApi.Standalone) return enterprise.ApplyStandalone(client, instance) } diff --git a/pkg/splunk/client/awss3client.go b/pkg/splunk/client/awss3client.go new file mode 100644 index 000000000..93d4d26a7 --- /dev/null +++ b/pkg/splunk/client/awss3client.go @@ -0,0 +1,209 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "path/filepath" + "regexp" + + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" +) + +// blank assignment to verify that AWSS3Client implements S3Client +var _ S3Client = &AWSS3Client{} + +// SplunkAWSS3Client is an interface to AWS S3 client +type SplunkAWSS3Client interface { + ListObjectsV2(options *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) +} + +// AWSS3Client is a client to implement S3 specific APIs +type AWSS3Client struct { + Region string + BucketName string + AWSAccessKeyID string + AWSSecretAccessKey string + Prefix string + StartAfter string + Endpoint string + Client SplunkAWSS3Client +} + +// regex to extract the region from the s3 endpoint +var regionRegex = ".*.s3[-,.](?P.*).amazonaws.com" + +// GetRegion extracts the region from the endpoint field +func GetRegion(endpoint string) string { + pattern := regexp.MustCompile(regionRegex) + if len(pattern.FindStringSubmatch(endpoint)) > 0 { + return pattern.FindStringSubmatch(endpoint)[1] + } + return "" +} + +// InitAWSClientWrapper is a wrapper around InitClientSession +func InitAWSClientWrapper(region, accessKeyID, secretAccessKey string) interface{} { + return InitAWSClientSession(region, accessKeyID, secretAccessKey) +} + +// InitAWSClientSession initializes and returns a client session object +func InitAWSClientSession(region, accessKeyID, secretAccessKey string) SplunkAWSS3Client { + scopedLog := log.WithName("InitAWSClientSession") + + // Enforcing minimum version TLS1.2 + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + tr.ForceAttemptHTTP2 = true + httpClient := http.Client{Transport: tr} + + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(region), + Credentials: credentials.NewStaticCredentials( + accessKeyID, // id + secretAccessKey, // secret + ""), + MaxRetries: aws.Int(3), + HTTPClient: &httpClient, + }) + if err != nil { + scopedLog.Error(err, "Failed to initialize an AWS S3 session.") + return nil + } + + // Create the s3Client + s3Client := s3.New(sess) + + // Validate transport + tlsVersion := "Unknown" + if tr, ok := s3Client.Config.HTTPClient.Transport.(*http.Transport); ok { + tlsVersion = getTLSVersion(tr) + } + + scopedLog.Info("AWS Client Session initialization successful.", "region", region, "TLS Version", tlsVersion) + + return s3Client +} + +// NewAWSS3Client returns an AWS S3 client +func NewAWSS3Client(bucketName string, accessKeyID string, secretAccessKey string, prefix string, startAfter string, endpoint string, fn GetInitFunc) (S3Client, error) { + var s3SplunkClient SplunkAWSS3Client + var err error + region := GetRegion(endpoint) + cl := fn(region, accessKeyID, secretAccessKey) + if cl == nil { + err = fmt.Errorf("Failed to create an AWS S3 client") + return nil, err + } + + s3SplunkClient = cl.(*s3.S3) + + return &AWSS3Client{ + Region: region, + BucketName: bucketName, + AWSAccessKeyID: accessKeyID, + AWSSecretAccessKey: secretAccessKey, + Prefix: prefix, + StartAfter: startAfter, + Endpoint: endpoint, + Client: s3SplunkClient, + }, nil +} + +// RegisterAWSS3Client will add the corresponding function pointer to the map +func RegisterAWSS3Client() { + wrapperObject := GetS3ClientWrapper{GetS3Client: NewAWSS3Client, GetInitFunc: InitAWSClientWrapper} + S3Clients["aws"] = wrapperObject +} + +func getTLSVersion(tr *http.Transport) string { + switch tr.TLSClientConfig.MinVersion { + case tls.VersionTLS10: + return "TLS 1.0" + case tls.VersionTLS11: + return "TLS 1.1" + case tls.VersionTLS12: + return "TLS 1.2" + case tls.VersionTLS13: + return "TLS 1.3" + } + + return "Unknown" +} + +// GetAppsList get the list of apps from remote storage +func (awsclient *AWSS3Client) GetAppsList() (S3Response, error) { + scopedLog := log.WithName("GetAppsList") + + scopedLog.Info("Getting Apps list", "AWS S3 Bucket", awsclient.BucketName) + s3Resp := S3Response{} + + options := &s3.ListObjectsV2Input{ + Bucket: aws.String(awsclient.BucketName), + Prefix: aws.String(awsclient.Prefix), + StartAfter: aws.String(awsclient.StartAfter), // exclude the directory itself from listing + MaxKeys: aws.Int64(4000), // return upto 4K keys from S3 + Delimiter: aws.String("/"), // limit the listing to 1 level only + } + + client := awsclient.Client + resp, err := client.ListObjectsV2(options) + if err != nil { + scopedLog.Error(err, "Unable to list items in bucket", "AWS S3 Bucket", awsclient.BucketName) + return s3Resp, err + } + + if resp.Contents == nil { + err = fmt.Errorf("Empty objects list in the bucket: %s", awsclient.BucketName) + return s3Resp, err + } + + tmp, err := json.Marshal(resp.Contents) + if err != nil { + scopedLog.Error(err, "Failed to marshal s3 response", "AWS S3 Bucket", awsclient.BucketName) + return s3Resp, err + } + + err = json.Unmarshal(tmp, &(s3Resp.Objects)) + if err != nil { + scopedLog.Error(err, "Failed to unmarshal s3 response", "AWS S3 Bucket", awsclient.BucketName) + return s3Resp, err + } + + return s3Resp, nil +} + +// GetInitContainerImage returns the initContainer image to be used with this s3 client +func (awsclient *AWSS3Client) GetInitContainerImage() string { + return ("amazon/aws-cli") +} + +// GetInitContainerCmd returns the init container command on a per app source basis to be used by the initContainer +func (awsclient *AWSS3Client) GetInitContainerCmd(endpoint string, bucket string, path string, appSrcName string, appMnt string) []string { + s3AppSrcPath := filepath.Join(bucket, path) + "/" + podSyncPath := filepath.Join(appMnt, appSrcName) + "/" + + return ([]string{fmt.Sprintf("--endpoint-url=%s", endpoint), "s3", "sync", fmt.Sprintf("s3://%s", s3AppSrcPath), podSyncPath}) +} diff --git a/pkg/splunk/client/awss3client_test.go b/pkg/splunk/client/awss3client_test.go new file mode 100644 index 000000000..e38dcc064 --- /dev/null +++ b/pkg/splunk/client/awss3client_test.go @@ -0,0 +1,292 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "reflect" + "testing" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + spltest "github.com/splunk/splunk-operator/pkg/splunk/test" +) + +func TestInitAWSClientWrapper(t *testing.T) { + + awsS3ClientSession := InitAWSClientWrapper("us-west-2", "abcd", "1234") + if awsS3ClientSession == nil { + t.Errorf("We should have got a valid AWS S3 client session object") + } +} + +func TestNewAWSS3Client(t *testing.T) { + + fn := InitAWSClientWrapper + awsS3Client, err := NewAWSS3Client("sample_bucket", "abcd", "xyz", "admin/", "admin", "https://s3.us-west-2.amazonaws.com", fn) + if awsS3Client == nil || err != nil { + t.Errorf("NewAWSS3Client should have returned a valid AWS S3 client.") + } + + // Test for invalid scenario, where we return nil client + fn = func(string, string, string) interface{} { + return nil + } + awsS3Client, err = NewAWSS3Client("sample_bucket", "abcd", "xyz", "admin/", "admin", "https://s3.us-west-2.amazonaws.com", fn) + if err == nil { + t.Errorf("NewAWSS3Client should have returned error.") + } +} + +func TestGetInitContainerImage(t *testing.T) { + awsClient := &AWSS3Client{} + + if awsClient.GetInitContainerImage() != "amazon/aws-cli" { + t.Errorf("Got invalid init container image for AWS client.") + } +} + +func TestGetAWSInitContainerCmd(t *testing.T) { + wantCmd := []string{"--endpoint-url=https://s3.us-west-2.amazonaws.com", "s3", "sync", "s3://sample_bucket/admin/", "/mnt/apps-local/admin/"} + + awsClient := &AWSS3Client{} + gotCmd := awsClient.GetInitContainerCmd("https://s3.us-west-2.amazonaws.com", "sample_bucket", "admin/", "admin", "/mnt/apps-local/") + if !reflect.DeepEqual(wantCmd, gotCmd) { + t.Errorf("Got incorrect Init container cmd") + } +} + +func TestGetAppsListShouldNotFail(t *testing.T) { + + appFrameworkRef := enterpriseApi.AppFrameworkSpec{ + Defaults: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol2", + Scope: "local", + }, + VolList: []enterpriseApi.VolumeSpec{ + { + Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + { + Name: "msos_s2s3_vol2", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london2", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + }, + }, + } + + awsClient := &AWSS3Client{} + + Etags := []string{"cc707187b036405f095a8ebb43a782c1", "5055a61b3d1b667a4c3279a381a2e7ae", "19779168370b97d8654424e6c9446dd8"} + Keys := []string{"admin_app.tgz", "security_app.tgz", "authentication_app.tgz"} + Sizes := []int64{10, 20, 30} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[1], + Key: &Keys[1], + LastModified: &randomTime, + Size: &Sizes[1], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[2], + Key: &Keys[2], + LastModified: &randomTime, + Size: &Sizes[2], + StorageClass: &StorageClass, + }, + }, + }, + } + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + var err error + var allSuccess bool = true + for index, appSource := range appFrameworkRef.AppSources { + + vol, err = GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + allSuccess = false + continue + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, NewMockAWSS3Client) + + initFn := func(region, accessKeyID, secretAccessKey string) interface{} { + cl := spltest.MockAWSS3Client{} + cl.Objects = mockAwsObjects[index].Objects + return cl + } + + getClientWrapper.SetS3ClientInitFuncPtr(vol.Name, initFn) + + getS3ClientFn := getClientWrapper.GetS3ClientInitFuncPtr() + awsClient.Client = getS3ClientFn("us-west-2", "abcd", "1234").(spltest.MockAWSS3Client) + + s3Response, err := awsClient.GetAppsList() + if err != nil { + allSuccess = false + continue + } + + var mockResponse spltest.MockAWSS3Client + mockResponse, err = ConvertS3Response(s3Response) + if err != nil { + allSuccess = false + continue + } + + if mockAwsHandler.GotSourceAppListResponseMap == nil { + mockAwsHandler.GotSourceAppListResponseMap = make(map[string]spltest.MockAWSS3Client) + } + + mockAwsHandler.GotSourceAppListResponseMap[appSource.Name] = mockResponse + } + + if allSuccess == false { + t.Errorf("Unable to get apps list for all the app sources") + } + method := "GetAppsList" + mockAwsHandler.CheckAWSS3Response(t, method) +} + +func TestGetAppsListShouldFail(t *testing.T) { + + appFrameworkRef := enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + } + + awsClient := &AWSS3Client{} + + Etag := "cc707187b036405f095a8ebb43a782c1" + Key := "admin_app.tgz" + Size := int64(10) + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etag, + Key: &Key, + LastModified: &randomTime, + Size: &Size, + StorageClass: &StorageClass, + }, + }, + }, + } + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + var err error + + appSource := appFrameworkRef.AppSources[0] + + vol, err = GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + t.Errorf("Unable to get Volume due to error=%s", err) + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, NewMockAWSS3Client) + + initFn := func(region, accessKeyID, secretAccessKey string) interface{} { + cl := spltest.MockAWSS3Client{} + // return empty objects list here to test the negative scenario + return cl + } + + getClientWrapper.SetS3ClientInitFuncPtr(vol.Name, initFn) + + getS3ClientFn := getClientWrapper.GetS3ClientInitFuncPtr() + awsClient.Client = getS3ClientFn("us-west-2", "abcd", "1234").(spltest.MockAWSS3Client) + + _, err = awsClient.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error since we have empty objects in the response") + } + +} diff --git a/pkg/splunk/client/minioclient.go b/pkg/splunk/client/minioclient.go new file mode 100644 index 000000000..bc9fe8699 --- /dev/null +++ b/pkg/splunk/client/minioclient.go @@ -0,0 +1,149 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "fmt" + "strings" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +// blank assignment to verify that MinioClient implements S3Client +var _ S3Client = &MinioClient{} + +// SplunkMinioClient is an interface to Minio S3 client +type SplunkMinioClient interface { + ListObjects(ctx context.Context, bucketName string, opts minio.ListObjectsOptions) <-chan minio.ObjectInfo +} + +// MinioClient is a client to implement S3 specific APIs +type MinioClient struct { + BucketName string + S3AccessKeyID string + S3SecretAccessKey string + Prefix string + StartAfter string + Endpoint string + Client SplunkMinioClient +} + +// NewMinioClient returns an Minio client +func NewMinioClient(bucketName string, accessKeyID string, secretAccessKey string, prefix string, startAfter string, endpoint string, fn GetInitFunc) (S3Client, error) { + var s3SplunkClient SplunkMinioClient + var err error + + cl := fn(endpoint, accessKeyID, secretAccessKey) + if cl == nil { + err = fmt.Errorf("Failed to create an AWS S3 client") + return nil, err + } + + s3SplunkClient = cl.(*minio.Client) + + return &MinioClient{ + BucketName: bucketName, + S3AccessKeyID: accessKeyID, + S3SecretAccessKey: secretAccessKey, + Prefix: prefix, + StartAfter: startAfter, + Endpoint: endpoint, + Client: s3SplunkClient, + }, nil +} + +//RegisterMinioClient will add the corresponding function pointer to the map +func RegisterMinioClient() { + wrapperObject := GetS3ClientWrapper{GetS3Client: NewMinioClient, GetInitFunc: InitMinioClientWrapper} + S3Clients["minio"] = wrapperObject +} + +// InitMinioClientWrapper is a wrapper around InitMinioClientSession +func InitMinioClientWrapper(appS3Endpoint string, accessKeyID string, secretAccessKey string) interface{} { + return InitMinioClientSession(appS3Endpoint, accessKeyID, secretAccessKey) +} + +// InitMinioClientSession initializes and returns a client session object +func InitMinioClientSession(appS3Endpoint string, accessKeyID string, secretAccessKey string) SplunkMinioClient { + scopedLog := log.WithName("InitMinioClientSession") + + // Check if SSL is needed + useSSL := true + if strings.HasPrefix(appS3Endpoint, "http://") { + // We should always use a secure SSL endpoint, so we won't set useSSL = false + scopedLog.Info("Using insecure endpoint, forcing useSSL=true for Minio Client Session", "appS3Endpoint", appS3Endpoint) + appS3Endpoint = strings.TrimPrefix(appS3Endpoint, "http://") + } else if strings.HasPrefix(appS3Endpoint, "https://") { + appS3Endpoint = strings.TrimPrefix(appS3Endpoint, "https://") + } else { + // Unsupported endpoint + scopedLog.Info("Unsupported endpoint for Minio S3 client", "appS3Endpoint", appS3Endpoint) + return nil + } + + // New returns an Minio compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + scopedLog.Info("Connecting to Minio S3 for apps", "appS3Endpoint", appS3Endpoint) + s3Client, err := minio.New(appS3Endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKeyID, secretAccessKey, ""), + Secure: useSSL, + }) + if err != nil { + scopedLog.Info("Error creating new Minio Client Session", "err", err) + return nil + } + + return s3Client +} + +// GetAppsList get the list of apps from remote storage +func (client *MinioClient) GetAppsList() (S3Response, error) { + scopedLog := log.WithName("GetAppsList") + + scopedLog.Info("Getting Apps list", " S3 Bucket", client.BucketName) + s3Resp := S3Response{} + s3Client := client.Client + + // Create a bucket list command for all files in bucket + opts := minio.ListObjectsOptions{ + UseV1: true, + Prefix: client.Prefix, + Recursive: false, + } + + // List all objects from a bucket-name with a matching prefix. + for object := range s3Client.ListObjects(context.Background(), client.BucketName, opts) { + if object.Err != nil { + scopedLog.Info("Got an object error", "object.Err", object.Err, "client.BucketName", client.BucketName) + return s3Resp, nil + } + scopedLog.Info("Got an object", "object", object) + s3Resp.Objects = append(s3Resp.Objects, &RemoteObject{Etag: &object.ETag, Key: &object.Key, LastModified: &object.LastModified, Size: &object.Size, StorageClass: &object.StorageClass}) + } + + return s3Resp, nil +} + +// GetInitContainerImage returns the initContainer image to be used with this s3 client +func (client *MinioClient) GetInitContainerImage() string { + return ("amazon/aws-cli") +} + +// GetInitContainerCmd returns the init container command on a per app source basis to be used by the initContainer +func (client *MinioClient) GetInitContainerCmd(endpoint string, bucket string, path string, appSrcName string, appMnt string) []string { + return ([]string{fmt.Sprintf("--endpoint-url=%s", endpoint), "s3", "sync", fmt.Sprintf("s3://%s/%s", bucket, path), fmt.Sprintf("%s/%s", appMnt, appSrcName)}) +} diff --git a/pkg/splunk/client/minioclient_test.go b/pkg/splunk/client/minioclient_test.go new file mode 100644 index 000000000..902498907 --- /dev/null +++ b/pkg/splunk/client/minioclient_test.go @@ -0,0 +1,69 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "reflect" + "testing" +) + +func TestInitMinioClientWrapper(t *testing.T) { + + minioS3ClientSession := InitMinioClientWrapper("https://s3.us-east-1.amazonaws.com", "abcd", "1234") + if minioS3ClientSession == nil { + t.Errorf("We should have got a valid Minio S3 client object") + } +} + +func TestNewMinioClient(t *testing.T) { + + fn := InitMinioClientWrapper + + // Test1. Test for endpoint with https + minioS3Client, err := NewMinioClient("sample_bucket", "abcd", "xyz", "admin/", "admin", "https://s3.us-west-2.amazonaws.com", fn) + if minioS3Client == nil || err != nil { + t.Errorf("NewMinioClient should have returned a valid Minio S3 client.") + } + + // Test2. Test for endpoint with http + minioS3Client, err = NewMinioClient("sample_bucket", "abcd", "xyz", "admin/", "admin", "http://s3.us-west-2.amazonaws.com", fn) + if minioS3Client == nil || err != nil { + t.Errorf("NewMinioClient should have returned a valid Minio S3 client.") + } + + // Test3. Test for invalid endpoint + minioS3Client, err = NewMinioClient("sample_bucket", "abcd", "xyz", "admin/", "admin", "random-endpoint.com", fn) + if minioS3Client != nil || err == nil { + t.Errorf("NewMinioClient should have returned a error.") + } +} + +func TestMinioGetInitContainerImage(t *testing.T) { + minioClient := &MinioClient{} + + if minioClient.GetInitContainerImage() != "amazon/aws-cli" { + t.Errorf("Got invalid init container image for Minio client.") + } +} + +func TestGetMinioInitContainerCmd(t *testing.T) { + wantCmd := []string{"--endpoint-url=https://s3.us-west-2.amazonaws.com", "s3", "sync", "s3://sample_bucket/admin", "/mnt/apps-local//admin"} + + minioClient := &MinioClient{} + gotCmd := minioClient.GetInitContainerCmd("https://s3.us-west-2.amazonaws.com", "sample_bucket", "admin", "admin", "/mnt/apps-local/") + if !reflect.DeepEqual(wantCmd, gotCmd) { + t.Errorf("Got incorrect Init container cmd") + } +} diff --git a/pkg/splunk/client/s3client.go b/pkg/splunk/client/s3client.go new file mode 100644 index 000000000..ec6c50f5d --- /dev/null +++ b/pkg/splunk/client/s3client.go @@ -0,0 +1,96 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "fmt" + "time" +) + +// GetS3ClientWrapper is a wrapper around init function pointers +type GetS3ClientWrapper struct { + GetS3Client + GetInitFunc +} + +// SetS3ClientFuncPtr sets the GetS3Client function pointer member of GetS3ClientWrapper struct +func (c *GetS3ClientWrapper) SetS3ClientFuncPtr(volName string, fn GetS3Client) { + c.GetS3Client = fn + S3Clients[volName] = *c +} + +// GetS3ClientFuncPtr gets the GetS3Client function pointer member of GetS3ClientWrapper struct +func (c *GetS3ClientWrapper) GetS3ClientFuncPtr() GetS3Client { + return c.GetS3Client +} + +// SetS3ClientInitFuncPtr sets the GetS3Client function pointer member of GetS3ClientWrapper struct +func (c *GetS3ClientWrapper) SetS3ClientInitFuncPtr(volName string, fn GetInitFunc) { + c.GetInitFunc = fn + S3Clients[volName] = *c +} + +// GetS3ClientInitFuncPtr gets the GetS3Client function pointer member of GetS3ClientWrapper struct +func (c *GetS3ClientWrapper) GetS3ClientInitFuncPtr() GetInitFunc { + return c.GetInitFunc +} + +// GetInitFunc gets the init function pointer which returns the new S3 session client object +type GetInitFunc func(string, string, string) interface{} + +//GetS3Client gets the required S3Client based on the provider +type GetS3Client func(string /* bucket */, string, /* AWS access key ID */ + string /* AWS secret access key */, string /* Prefix */, string /* StartAfter */, string /* Endpoint */, GetInitFunc) (S3Client, error) + +// S3Clients is a map of provider name to init functions +var S3Clients = make(map[string]GetS3ClientWrapper) + +// S3Client is an interface to implement different S3 client APIs +type S3Client interface { + GetAppsList() (S3Response, error) + GetInitContainerImage() string + GetInitContainerCmd(string /* endpoint */, string /* bucket */, string /* path */, string /* app src name */, string /* app mnt */) []string +} + +// SplunkS3Client is a simple object used to connect to S3 +type SplunkS3Client struct { + Client S3Client +} + +// S3Response struct contains list of RemoteObject objects as part of S3 response +type S3Response struct { + Objects []*RemoteObject +} + +// RemoteObject struct contains contents returned as part of S3 response +type RemoteObject struct { + Etag *string + Key *string + LastModified *time.Time + Size *int64 + StorageClass *string +} + +//RegisterS3Client registers the respective Client +func RegisterS3Client(provider string) { + switch provider { + case "aws": + RegisterAWSS3Client() + case "minio": + RegisterMinioClient() + default: + fmt.Println("ERROR: Invalid provider specified: ", provider) + } +} diff --git a/pkg/splunk/client/s3client_test.go b/pkg/splunk/client/s3client_test.go new file mode 100644 index 000000000..306a9d04d --- /dev/null +++ b/pkg/splunk/client/s3client_test.go @@ -0,0 +1,74 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "testing" +) + +func TestRegisterS3Client(t *testing.T) { + + // clear any stale entries present in the S3clients map + for k := range S3Clients { + delete(S3Clients, k) + } + + // 1. Test for aws + RegisterS3Client("aws") + if len(S3Clients) == 0 { + t.Errorf("We should have initialized the client for aws.") + } + + // 2. Test for minio + RegisterS3Client("minio") + if len(S3Clients) == 1 { + t.Errorf("We should have initialized the client for minio as well.") + } + + // 3. Test for invalid provider + RegisterS3Client("invalid") + if len(S3Clients) > 2 { + t.Errorf("We should only have initialized the client for aws and minio and not for an invalid provider.") + } + +} + +func TestGetSetS3ClientFuncPtr(t *testing.T) { + c := &GetS3ClientWrapper{} + + fn := c.GetS3ClientFuncPtr() + if fn != nil { + t.Errorf("We should have received a nil function pointer") + } + + c.SetS3ClientFuncPtr("aws", NewAWSS3Client) + if c.GetS3Client == nil { + t.Errorf("We should have set GetS3Client func pointer for AWS client.") + } +} + +func TestGetSetS3ClientInitFuncPtr(t *testing.T) { + c := &GetS3ClientWrapper{} + + fn := c.GetS3ClientInitFuncPtr() + if fn != nil { + t.Errorf("We should have received a nil init function pointer") + } + + c.SetS3ClientInitFuncPtr("aws", InitAWSClientWrapper) + if c.GetInitFunc == nil { + t.Errorf("We should have set GetInitFunc func pointer for AWS client.") + } +} diff --git a/pkg/splunk/client/util.go b/pkg/splunk/client/util.go new file mode 100644 index 000000000..7415e5416 --- /dev/null +++ b/pkg/splunk/client/util.go @@ -0,0 +1,110 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "encoding/json" + "fmt" + + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + spltest "github.com/splunk/splunk-operator/pkg/splunk/test" +) + +// NewMockAWSS3Client returns an AWS S3 mock client for testing +// Ideally this function should live in test package but due to +// dependency of some variables in client package and to avoid +// cyclic dependency this has to live here. +func NewMockAWSS3Client(bucketName string, accessKeyID string, secretAccessKey string, prefix string, startAfter string, endpoint string, fn GetInitFunc) (S3Client, error) { + var s3SplunkClient SplunkAWSS3Client + var err error + region := GetRegion(endpoint) + + cl := fn(region, accessKeyID, secretAccessKey) + if cl == nil { + err = fmt.Errorf("Failed to create an AWS S3 client") + return nil, err + } + + s3SplunkClient = cl.(SplunkAWSS3Client) + + return &AWSS3Client{ + Region: region, + BucketName: bucketName, + AWSAccessKeyID: accessKeyID, + AWSSecretAccessKey: secretAccessKey, + Prefix: prefix, + StartAfter: startAfter, + Endpoint: endpoint, + Client: s3SplunkClient, + }, nil +} + +// ConvertS3Response converts S3 Response to a mock client response +func ConvertS3Response(s3Response S3Response) (spltest.MockAWSS3Client, error) { + scopedLog := log.WithName("ConvertS3Response") + + var mockResponse spltest.MockAWSS3Client + + tmp, err := json.Marshal(s3Response) + if err != nil { + scopedLog.Error(err, "Unable to marshal s3 response") + return mockResponse, err + } + + err = json.Unmarshal(tmp, &mockResponse) + if err != nil { + scopedLog.Error(err, "Unable to unmarshal s3 response") + return mockResponse, err + } + + return mockResponse, err +} + +// CheckIfVolumeExists checks if the volume is configured or not +func CheckIfVolumeExists(volumeList []enterpriseApi.VolumeSpec, volName string) (int, error) { + for i, volume := range volumeList { + if volume.Name == volName { + return i, nil + } + } + + return -1, fmt.Errorf("Volume: %s, doesn't exist", volName) +} + +// GetAppSrcVolume gets the volume defintion for an app source +func GetAppSrcVolume(appSource enterpriseApi.AppSourceSpec, appFrameworkRef *enterpriseApi.AppFrameworkSpec) (enterpriseApi.VolumeSpec, error) { + var volName string + var index int + var err error + var vol enterpriseApi.VolumeSpec + + scopedLog := log.WithName("GetAppSrcVolume") + + // get the volume spec from the volume name + if appSource.VolName != "" { + volName = appSource.VolName + } else { + volName = appFrameworkRef.Defaults.VolName + } + + index, err = CheckIfVolumeExists(appFrameworkRef.VolList, volName) + if err != nil { + scopedLog.Error(err, "Invalid volume name provided. Please specify a valid volume name.", "App source", appSource.Name, "Volume name", volName) + return vol, err + } + + vol = appFrameworkRef.VolList[index] + return vol, err +} diff --git a/pkg/splunk/client/util_test.go b/pkg/splunk/client/util_test.go new file mode 100644 index 000000000..b047b5bef --- /dev/null +++ b/pkg/splunk/client/util_test.go @@ -0,0 +1,156 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "reflect" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + spltest "github.com/splunk/splunk-operator/pkg/splunk/test" +) + +func TestCheckIfVolumeExists(t *testing.T) { + SmartStoreConfig := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, + }, + IndexList: []enterpriseApi.IndexSpec{ + {Name: "salesdata1", RemotePath: "remotepath1", + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ + VolName: "msos_s2s3_vol"}, + }, + {Name: "salesdata2", RemotePath: "remotepath2", + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ + VolName: "msos_s2s3_vol"}, + }, + {Name: "salesdata3", RemotePath: "remotepath3", + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ + VolName: "msos_s2s3_vol"}, + }, + }, + } + + // Volume that doesn't should error out + _, err := CheckIfVolumeExists(SmartStoreConfig.VolList, "random_volume_name") + + if err == nil { + t.Errorf("if the volume doesn't exists, error should be reported") + } + + // Volume that exists should not error out + index := len(SmartStoreConfig.VolList) - 1 + returnedIndex, err := CheckIfVolumeExists(SmartStoreConfig.VolList, SmartStoreConfig.VolList[index].Name) + + if err != nil { + t.Errorf("existing volume should not error out. index id: %d, error: %s", index, err.Error()) + } else if index != returnedIndex { + t.Errorf("Expected index: %d, but returned index id: %d", index, returnedIndex) + } +} + +func TestNewMockAWSS3Client(t *testing.T) { + + // Test 1. Test the valid case + initFn := func(region, accessKeyID, secretAccessKey string) interface{} { + cl := spltest.MockAWSS3Client{} + return cl + } + _, err := NewMockAWSS3Client("sample_bucket", "abcd", "1234", "admin/", "admin", "htts://s3.us-west-2.amazonaws.com", initFn) + if err != nil { + t.Errorf("NewMockAWSS3Client should have returned a Mock AWS client.") + } + + // Test 2. Test the invalid case by returning nil client + initFn = func(region, accessKeyID, secretAccessKey string) interface{} { + return nil + } + _, err = NewMockAWSS3Client("sample_bucket", "abcd", "1234", "admin/", "admin", "htts://s3.us-west-2.amazonaws.com", initFn) + if err == nil { + t.Errorf("NewMockAWSS3Client should have returned an error since we passed nil client in init function.") + } +} + +func TestGetVolume(t *testing.T) { + appFrameworkRef := enterpriseApi.AppFrameworkSpec{ + AppsRepoPollInterval: 60, + Defaults: enterpriseApi.AppSourceDefaultSpec{ + VolName: "vol2", + Scope: "cluster", + }, + + VolList: []enterpriseApi.VolumeSpec{ + { + Name: "vol1", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + { + Name: "vol2", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london-2", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + { + Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "vol1", + Scope: "local", + }, + }, + { + Name: "securityApps", + Location: "securityAppsRepo", + }, + }, + } + + // test for valid volumes + for index, appSource := range appFrameworkRef.AppSources { + vol, err := GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + t.Errorf("GetVolume should not have returned error") + } + + if !reflect.DeepEqual(vol, appFrameworkRef.VolList[index]) { + t.Errorf("returned volume spec is not correct") + } + } + + // test for an invalid volume + appFrameworkRef.AppSources = []enterpriseApi.AppSourceSpec{ + { + Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "invalid_volume", + Scope: "local", + }, + }, + } + + _, err := GetAppSrcVolume(appFrameworkRef.AppSources[0], &appFrameworkRef) + if err == nil { + t.Errorf("GetVolume should have returned error for an invalid volume name") + } +} diff --git a/pkg/splunk/common/names.go b/pkg/splunk/common/names.go index bb0c23952..7bc7d3916 100644 --- a/pkg/splunk/common/names.go +++ b/pkg/splunk/common/names.go @@ -70,6 +70,19 @@ const ( // SortFieldKey represents field name Key for sorting SortFieldKey = "Key" + + // Appframework specific polling intervals + // Need to make sure to change the comment in + // the spec section for AppFramework about the polling intervals. + + // DefaultAppsRepoPollInterval sets the polling interval to one hour + DefaultAppsRepoPollInterval int64 = 60 * 60 + + // MinAppsRepoPollInterval sets the polling interval to one minute + MinAppsRepoPollInterval int64 = 60 + + // MaxAppsRepoPollInterval sets the polling interval to one day + MaxAppsRepoPollInterval int64 = 60 * 60 * 24 ) // GetVersionedSecretName returns a versioned secret name diff --git a/pkg/splunk/controller/configmap.go b/pkg/splunk/controller/configmap.go index 2953d3e08..ee3dd2fbf 100644 --- a/pkg/splunk/controller/configmap.go +++ b/pkg/splunk/controller/configmap.go @@ -19,6 +19,7 @@ import ( "reflect" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" @@ -75,3 +76,16 @@ func GetConfigMapResourceVersion(client splcommon.ControllerClient, namespacedNa } return configMap.ResourceVersion, nil } + +// PrepareConfigMap prepares and returns a K8 ConfigMap object for the given data +func PrepareConfigMap(configMapName, namespace string, dataMap map[string]string) *corev1.ConfigMap { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: namespace, + }, + } + configMap.Data = dataMap + + return configMap +} diff --git a/pkg/splunk/controller/configmap_test.go b/pkg/splunk/controller/configmap_test.go index df463732d..d44b5da9f 100644 --- a/pkg/splunk/controller/configmap_test.go +++ b/pkg/splunk/controller/configmap_test.go @@ -15,6 +15,7 @@ package controller import ( + "reflect" "testing" corev1 "k8s.io/api/core/v1" @@ -101,3 +102,26 @@ func TestGetConfigMapResourceVersion(t *testing.T) { t.Errorf("Should not return an error, when the configMap exists") } } + +func TestPrepareConfigMap(t *testing.T) { + var configMapName = "testConfgMap" + var namespace = "testNameSpace" + expectedCm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: namespace, + }, + } + + dataMap := make(map[string]string) + dataMap["a"] = "x" + dataMap["b"] = "y" + dataMap["z"] = "z" + expectedCm.Data = dataMap + + returnedCM := PrepareConfigMap(configMapName, namespace, dataMap) + + if !reflect.DeepEqual(expectedCm, returnedCM) { + t.Errorf("configMap preparation failed") + } +} diff --git a/pkg/splunk/controller/statefulset.go b/pkg/splunk/controller/statefulset.go index b70814168..d4a4496e5 100644 --- a/pkg/splunk/controller/statefulset.go +++ b/pkg/splunk/controller/statefulset.go @@ -282,3 +282,17 @@ func GetStatefulSetByName(c splcommon.ControllerClient, namespacedName types.Nam return &statefulset, nil } + +// IsStatefulSetScalingUp checks if we are currently scaling up +func IsStatefulSetScalingUp(client splcommon.ControllerClient, cr splcommon.MetaObject, name string, desiredReplicas int32) (bool, error) { + scopedLog := log.WithName("isScalingUp").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + + namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: name} + current, err := GetStatefulSetByName(client, namespacedName) + if err != nil { + scopedLog.Error(err, "Unable to get current stateful set", "name", namespacedName) + return false, err + } + + return *current.Spec.Replicas < desiredReplicas, nil +} diff --git a/pkg/splunk/controller/statefulset_test.go b/pkg/splunk/controller/statefulset_test.go index 353d812b7..aa7101e4c 100644 --- a/pkg/splunk/controller/statefulset_test.go +++ b/pkg/splunk/controller/statefulset_test.go @@ -26,7 +26,7 @@ import ( spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" ) @@ -147,7 +147,7 @@ func TestUpdateStatefulSetPods(t *testing.T) { } func TestSetStatefulSetOwnerRef(t *testing.T) { - cr := enterprisev1.Standalone{ + cr := enterpriseApi.Standalone{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -208,3 +208,39 @@ func TestGetStatefulSetByName(t *testing.T) { t.Errorf(err.Error()) } } + +func TestIsStatefulSetScalingUp(t *testing.T) { + var replicas int32 = 1 + statefulSetName := "splunk-stand1-standalone" + + cr := enterpriseApi.Standalone{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stand1", + Namespace: "test", + }, + } + + current := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: statefulSetName, + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Replicas: &replicas, + }, + } + + c := spltest.NewMockClient() + + *current.Spec.Replicas = 2 + _, err := IsStatefulSetScalingUp(c, &cr, statefulSetName, replicas) + if err == nil { + t.Errorf("IsStatefulSetScalingUp should have returned error as we have not yet added statefulset to client.") + } + + c.AddObject(current) + _, err = IsStatefulSetScalingUp(c, &cr, statefulSetName, replicas) + if err != nil { + t.Errorf("IsStatefulSetScalingUp should not have returned error") + } +} diff --git a/pkg/splunk/enterprise/clustermaster.go b/pkg/splunk/enterprise/clustermaster.go index 1040561b9..936aa7c1d 100644 --- a/pkg/splunk/enterprise/clustermaster.go +++ b/pkg/splunk/enterprise/clustermaster.go @@ -20,7 +20,7 @@ import ( "reflect" "time" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" appsv1 "k8s.io/api/apps/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -32,7 +32,7 @@ import ( ) // ApplyClusterMaster reconciles the state of a Splunk Enterprise cluster master. -func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterprisev1.ClusterMaster) (reconcile.Result, error) { +func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -83,6 +83,16 @@ func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterprisev1.Clus } }() + // If the app framework is configured then do following things - + // 1. Initialize the S3Clients based on providers + // 2. Check the status of apps on remote storage. + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { + err := initAndCheckAppInfoStatus(client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + if err != nil { + return result, err + } + } + // create or update general config resources _, err = ApplySplunkConfig(client, cr, cr.Spec.CommonSplunkSpec, SplunkIndexer) if err != nil { @@ -132,6 +142,10 @@ func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterprisev1.Clus // no need to requeue if everything is ready if cr.Status.Phase == splcommon.PhaseReady { + if cr.Status.AppContext.AppsSrcDeployStatus != nil { + markAppsStatusToComplete(cr.Status.AppContext.AppsSrcDeployStatus) + } + err = ApplyMonitoringConsole(client, cr, cr.Spec.CommonSplunkSpec, getClusterMasterExtraEnv(cr, &cr.Spec.CommonSplunkSpec)) if err != nil { return result, err @@ -145,41 +159,59 @@ func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterprisev1.Clus } if cr.Status.BundlePushTracker.NeedToPushMasterApps == false { - result.Requeue = false + // Requeue the reconcile after polling interval if we had set the lastAppInfoCheckTime. + if cr.Status.AppContext.LastAppInfoCheckTime != 0 { + result.RequeueAfter = GetNextRequeueTime(cr.Status.AppContext.AppsRepoStatusPollInterval, cr.Status.AppContext.LastAppInfoCheckTime) + } else { + result.Requeue = false + } } } return result, nil } // validateClusterMasterSpec checks validity and makes default updates to a ClusterMasterSpec, and returns error if something is wrong. -func validateClusterMasterSpec(cr *enterprisev1.ClusterMaster) error { - err := ValidateSplunkSmartstoreSpec(&cr.Spec.SmartStore) - if err != nil { - return err +func validateClusterMasterSpec(cr *enterpriseApi.ClusterMaster) error { + + if !reflect.DeepEqual(cr.Status.SmartStore, cr.Spec.SmartStore) { + err := ValidateSplunkSmartstoreSpec(&cr.Spec.SmartStore) + if err != nil { + return err + } + } + + if !reflect.DeepEqual(cr.Status.AppContext.AppFrameworkConfig, cr.Spec.AppFrameworkConfig) { + err := ValidateAppFrameworkSpec(&cr.Spec.AppFrameworkConfig, &cr.Status.AppContext, false) + if err != nil { + return err + } } return validateCommonSplunkSpec(&cr.Spec.CommonSplunkSpec) } // getClusterMasterStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. -func getClusterMasterStatefulSet(client splcommon.ControllerClient, cr *enterprisev1.ClusterMaster) (*appsv1.StatefulSet, error) { +func getClusterMasterStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) (*appsv1.StatefulSet, error) { var extraEnvVar []corev1.EnvVar ss, err := getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkClusterMaster, 1, extraEnvVar) if err != nil { return ss, err } - _, exists := getSmartstoreConfigMap(client, cr, SplunkClusterMaster) + smartStoreConfigMap := getSmartstoreConfigMap(client, cr, SplunkClusterMaster) - if exists { + if smartStoreConfigMap != nil { setupInitContainer(&ss.Spec.Template, cr.Spec.Image, cr.Spec.ImagePullPolicy, commandForCMSmartstore) } + // Setup App framework init containers + setupAppInitContainers(client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) + return ss, err } // CheckIfsmartstoreConfigMapUpdatedToPod checks if the smartstore configMap is updated on Pod or not -func CheckIfsmartstoreConfigMapUpdatedToPod(c splcommon.ControllerClient, cr *enterprisev1.ClusterMaster) error { +func CheckIfsmartstoreConfigMapUpdatedToPod(c splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) error { scopedLog := log.WithName("CheckIfsmartstoreConfigMapUpdatedToPod").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) masterIdxcName := cr.GetName() @@ -191,9 +223,9 @@ func CheckIfsmartstoreConfigMapUpdatedToPod(c splcommon.ControllerClient, cr *en return fmt.Errorf("Failed to check config token value on pod. stdout=%s, stderror=%s, error=%v", stdOut, stdErr, err) } - configMap, exists := getSmartstoreConfigMap(c, cr, SplunkClusterMaster) - if exists { - tokenFromConfigMap := configMap.Data[configToken] + smartStoreConfigMap := getSmartstoreConfigMap(c, cr, SplunkClusterMaster) + if smartStoreConfigMap != nil { + tokenFromConfigMap := smartStoreConfigMap.Data[configToken] if tokenFromConfigMap == stdOut { scopedLog.Info("Token Matched.", "on Pod=", stdOut, "from configMap=", tokenFromConfigMap) return nil @@ -206,7 +238,7 @@ func CheckIfsmartstoreConfigMapUpdatedToPod(c splcommon.ControllerClient, cr *en } // PerformCmBundlePush initiates the bundle push from cluster master -func PerformCmBundlePush(c splcommon.ControllerClient, cr *enterprisev1.ClusterMaster) error { +func PerformCmBundlePush(c splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) error { if cr.Status.BundlePushTracker.NeedToPushMasterApps == false { return nil } @@ -244,7 +276,7 @@ func PerformCmBundlePush(c splcommon.ControllerClient, cr *enterprisev1.ClusterM } // PushMasterAppsBundle issues the REST command to for cluster master bundle push -func PushMasterAppsBundle(c splcommon.ControllerClient, cr *enterprisev1.ClusterMaster) error { +func PushMasterAppsBundle(c splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) error { scopedLog := log.WithName("PushMasterApps").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) defaultSecretObjName := splcommon.GetNamespaceScopedSecretName(cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/clustermaster_test.go b/pkg/splunk/enterprise/clustermaster_test.go index d03e635d3..0ca4deab0 100644 --- a/pkg/splunk/enterprise/clustermaster_test.go +++ b/pkg/splunk/enterprise/clustermaster_test.go @@ -25,7 +25,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -41,6 +42,7 @@ func TestApplyClusterMaster(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-cluster-master-secret-v1"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermaster-smartstore"}, + {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermaster-app-list"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermaster-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-master"}, } @@ -55,10 +57,10 @@ func TestApplyClusterMaster(t *testing.T) { } listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[5], funcCalls[8]}, "List": {listmockCall[0]}, "Update": {funcCalls[0]}} - updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[6], funcCalls[7], funcCalls[8]}, "Update": {funcCalls[8]}, "List": {listmockCall[0]}} + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[5], funcCalls[9]}, "List": {listmockCall[0]}, "Update": {funcCalls[0]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[6], funcCalls[7], funcCalls[8], funcCalls[9]}, "Update": {funcCalls[9]}, "List": {listmockCall[0]}} - current := enterprisev1.ClusterMaster{ + current := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, @@ -66,8 +68,8 @@ func TestApplyClusterMaster(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, }, @@ -75,7 +77,7 @@ func TestApplyClusterMaster(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterMaster(c, cr.(*enterprisev1.ClusterMaster)) + _, err := ApplyClusterMaster(c, cr.(*enterpriseApi.ClusterMaster)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyClusterMaster", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -85,14 +87,14 @@ func TestApplyClusterMaster(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyClusterMaster(c, cr.(*enterprisev1.ClusterMaster)) + _, err := ApplyClusterMaster(c, cr.(*enterpriseApi.ClusterMaster)) return true, err } splunkDeletionTester(t, revised, deleteFunc) } func TestGetClusterMasterStatefulSet(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -162,6 +164,7 @@ func TestApplyClusterMasterWithSmartstore(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-cluster-master-secret-v1"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermaster-smartstore"}, + {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermaster-app-list"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-clustermaster-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-cluster-master"}, {MetaName: "*v1.Pod-test-splunk-stack1-cluster-master-0"}, @@ -184,10 +187,10 @@ func TestApplyClusterMasterWithSmartstore(t *testing.T) { } listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[6], funcCalls[7], funcCalls[9], funcCalls[15], funcCalls[16], funcCalls[17], funcCalls[18], funcCalls[20]}, "List": {listmockCall[0], listmockCall[0], listmockCall[0]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[20]}} - updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[2], funcCalls[3], funcCalls[5], funcCalls[5], funcCalls[6], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[11], funcCalls[11], funcCalls[12]}, "Update": {funcCalls[10], funcCalls[12]}, "List": {listmockCall[0]}} + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[6], funcCalls[7], funcCalls[9], funcCalls[16], funcCalls[17], funcCalls[18], funcCalls[19], funcCalls[21]}, "List": {listmockCall[0], listmockCall[0], listmockCall[0]}, "Update": {funcCalls[0], funcCalls[3], funcCalls[21]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[2], funcCalls[3], funcCalls[5], funcCalls[5], funcCalls[6], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[11], funcCalls[12], funcCalls[13]}, "Update": {funcCalls[10], funcCalls[13]}, "List": {listmockCall[0]}} - current := enterprisev1.ClusterMaster{ + current := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, @@ -195,28 +198,28 @@ func TestApplyClusterMasterWithSmartstore(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, }, - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, }, @@ -253,7 +256,7 @@ func TestApplyClusterMasterWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyClusterMaster(c, cr.(*enterprisev1.ClusterMaster)) + _, err := ApplyClusterMaster(c, cr.(*enterpriseApi.ClusterMaster)) return err } @@ -313,7 +316,7 @@ func TestApplyClusterMasterWithSmartstore(t *testing.T) { func TestPerformCmBundlePush(t *testing.T) { - current := enterprisev1.ClusterMaster{ + current := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, @@ -321,8 +324,8 @@ func TestPerformCmBundlePush(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, }, @@ -386,7 +389,7 @@ func TestPerformCmBundlePush(t *testing.T) { func TestPushMasterAppsBundle(t *testing.T) { - current := enterprisev1.ClusterMaster{ + current := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, @@ -394,8 +397,8 @@ func TestPushMasterAppsBundle(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, }, @@ -431,3 +434,390 @@ func TestPushMasterAppsBundle(t *testing.T) { t.Errorf("Bundle push should fail, when the password is not found") } } + +func TestAppFrameworkApplyClusterMasterShouldNotFail(t *testing.T) { + cm := enterpriseApi.ClusterMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterMasterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + _, err = ApplyClusterMaster(client, &cm) + if err != nil { + t.Errorf("ApplyClusterMaster should not have returned error here.") + } +} + +func TestClusterMasterGetAppsListForAWSS3ClientShouldNotFail(t *testing.T) { + cm := enterpriseApi.ClusterMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterMasterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + Defaults: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol2", + Scope: "local", + }, + VolList: []enterpriseApi.VolumeSpec{ + { + Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + { + Name: "msos_s2s3_vol2", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london-2", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + { + Name: "authenticationApps", + Location: "authenticationAppsRepo", + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1", "5055a61b3d1b667a4c3279a381a2e7ae", "19779168370b97d8654424e6c9446dd8"} + Keys := []string{"admin_app.tgz", "security_app.tgz", "authentication_app.tgz"} + Sizes := []int64{10, 20, 30} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[1], + Key: &Keys[1], + LastModified: &randomTime, + Size: &Sizes[1], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[2], + Key: &Keys[2], + LastModified: &randomTime, + Size: &Sizes[2], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := cm.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + var allSuccess bool = true + for index, appSource := range appFrameworkRef.AppSources { + + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + allSuccess = false + continue + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{ + client: client, + cr: &cm, + appFrameworkRef: &cm.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + cl := spltest.MockAWSS3Client{} + cl.Objects = mockAwsObjects[index].Objects + return cl + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, + appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, + location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + // Get the mock client + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + s3Response, err := s3ClientMgr.GetAppsList() + if err != nil { + allSuccess = false + continue + } + + var mockResponse spltest.MockAWSS3Client + mockResponse, err = splclient.ConvertS3Response(s3Response) + if err != nil { + allSuccess = false + continue + } + + if mockAwsHandler.GotSourceAppListResponseMap == nil { + mockAwsHandler.GotSourceAppListResponseMap = make(map[string]spltest.MockAWSS3Client) + } + + mockAwsHandler.GotSourceAppListResponseMap[appSource.Name] = mockResponse + } + + if allSuccess == false { + t.Errorf("Unable to get apps list for all the app sources") + } + method := "GetAppsList" + mockAwsHandler.CheckAWSS3Response(t, method) +} + +func TestClusterMasterGetAppsListForAWSS3ClientShouldFail(t *testing.T) { + cm := enterpriseApi.ClusterMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterMasterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1"} + Keys := []string{"admin_app.tgz"} + Sizes := []int64{10} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := cm.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + + appSource := appFrameworkRef.AppSources[0] + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + t.Errorf("Unable to get Volume due to error=%s", err) + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{ + client: client, + cr: &cm, + appFrameworkRef: &cm.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + // Purposefully return nil here so that we test the error scenario + return nil + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, + appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, + location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + // Get the mock client + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as there is no S3 secret provided") + } + + // Create empty S3 secret + s3Secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3-secret", + Namespace: "test", + }, + Data: map[string][]byte{}, + } + + client.AddObject(&s3Secret) + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty keys") + } + + s3AccessKey := []byte{'1'} + s3Secret.Data = map[string][]byte{"s3_access_key": s3AccessKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_secret_key") + } + + s3SecretKey := []byte{'2'} + s3Secret.Data = map[string][]byte{"s3_secret_key": s3SecretKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_access_key") + } + + // Create S3 secret + s3Secret = spltest.GetMockS3SecretKeys("s3-secret") + + // This should return an error as we have initialized initFn for s3ClientMgr + // to return a nil client. + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we could not get the S3 client") + } + + s3ClientMgr.initFn = func(region, accessKeyID, secretAccessKey string) interface{} { + // To test the error scenario, do no set the Objects member yet + cl := spltest.MockAWSS3Client{} + return cl + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we have empty objects in MockAWSS3Client") + } +} diff --git a/pkg/splunk/enterprise/configuration.go b/pkg/splunk/enterprise/configuration.go index bb831b820..a8da20539 100644 --- a/pkg/splunk/enterprise/configuration.go +++ b/pkg/splunk/enterprise/configuration.go @@ -17,6 +17,8 @@ package enterprise import ( "context" "fmt" + "sort" + "strings" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -25,7 +27,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" @@ -48,7 +51,7 @@ func getSplunkLabels(instanceIdentifier string, instanceType InstanceType, partO } // getSplunkVolumeClaims returns a standard collection of Kubernetes volume claims. -func getSplunkVolumeClaims(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec, labels map[string]string, volumeType string) (corev1.PersistentVolumeClaim, error) { +func getSplunkVolumeClaims(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, labels map[string]string, volumeType string) (corev1.PersistentVolumeClaim, error) { var storageCapacity resource.Quantity var err error @@ -98,7 +101,7 @@ func getSplunkVolumeClaims(cr splcommon.MetaObject, spec *enterprisev1.CommonSpl } // getSplunkService returns a Kubernetes Service object for Splunk instances configured for a Splunk Enterprise resource. -func getSplunkService(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec, instanceType InstanceType, isHeadless bool) *corev1.Service { +func getSplunkService(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, instanceType InstanceType, isHeadless bool) *corev1.Service { // use template if not headless var service *corev1.Service @@ -166,7 +169,7 @@ func getSplunkService(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSp } // setVolumeDefaults set properties in Volumes to default values -func setVolumeDefaults(spec *enterprisev1.CommonSplunkSpec) { +func setVolumeDefaults(spec *enterpriseApi.CommonSplunkSpec) { // work-around openapi validation error by ensuring it is not nil if spec.Volumes == nil { @@ -193,7 +196,7 @@ func setVolumeDefaults(spec *enterprisev1.CommonSplunkSpec) { } // validateCommonSplunkSpec checks validity and makes default updates to a CommonSplunkSpec, and returns error if something is wrong. -func validateCommonSplunkSpec(spec *enterprisev1.CommonSplunkSpec) error { +func validateCommonSplunkSpec(spec *enterpriseApi.CommonSplunkSpec) error { // if not specified via spec or env, image defaults to splunk/splunk spec.Spec.Image = GetSplunkImage(spec.Spec.Image) @@ -208,6 +211,14 @@ func validateCommonSplunkSpec(spec *enterprisev1.CommonSplunkSpec) error { }, } + if spec.LivenessInitialDelaySeconds < 0 { + return fmt.Errorf("Negative value (%d) is not allowed for Liveness probe intial delay", spec.LivenessInitialDelaySeconds) + } + + if spec.ReadinessInitialDelaySeconds < 0 { + return fmt.Errorf("Negative value (%d) is not allowed for Readiness probe intial delay", spec.ReadinessInitialDelaySeconds) + } + setVolumeDefaults(spec) return splcommon.ValidateSpec(&spec.Spec, defaultResources) @@ -226,19 +237,6 @@ func getSplunkDefaults(identifier, namespace string, instanceType InstanceType, } } -// prepareSplunkSmartstoreConfigMap returns a K8 ConfigMap containing Splunk smartstore config in INI format -func prepareSplunkSmartstoreConfigMap(identifier, namespace string, crKind string, dataIniMap map[string]string) *corev1.ConfigMap { - configMapIni := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: GetSplunkSmartstoreConfigMapName(identifier, crKind), - Namespace: namespace, - }, - } - configMapIni.Data = dataIniMap - - return configMapIni -} - // getSplunkPorts returns a map of ports to use for Splunk instances. func getSplunkPorts(instanceType InstanceType) map[string]int { result := map[string]int{ @@ -305,7 +303,7 @@ func addSplunkVolumeToTemplate(podTemplateSpec *corev1.PodTemplateSpec, name str } // addPVCVolumes adds pvc volumes to statefulSet -func addPVCVolumes(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec, statefulSet *appsv1.StatefulSet, labels map[string]string, volumeType string) error { +func addPVCVolumes(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, statefulSet *appsv1.StatefulSet, labels map[string]string, volumeType string) error { // prepare and append persistent volume claims if storage is not ephemeral var err error volumeClaimTemplate, err := getSplunkVolumeClaims(cr, spec, labels, volumeType) @@ -346,7 +344,7 @@ func addEphermalVolumes(statefulSet *appsv1.StatefulSet, volumeType string) erro } // addStorageVolumes adds storage volumes to the StatefulSet -func addStorageVolumes(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec, statefulSet *appsv1.StatefulSet, labels map[string]string) error { +func addStorageVolumes(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, statefulSet *appsv1.StatefulSet, labels map[string]string) error { // configure storage for mount path /opt/splunk/etc if spec.EtcVolumeStorageConfig.EphemeralStorage { // add Ephermal volumes @@ -375,7 +373,7 @@ func addStorageVolumes(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkS } // getSplunkStatefulSet returns a Kubernetes StatefulSet object for Splunk instances configured for a Splunk Enterprise resource. -func getSplunkStatefulSet(client splcommon.ControllerClient, cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec, instanceType InstanceType, replicas int32, extraEnv []corev1.EnvVar) (*appsv1.StatefulSet, error) { +func getSplunkStatefulSet(client splcommon.ControllerClient, cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, instanceType InstanceType, replicas int32, extraEnv []corev1.EnvVar) (*appsv1.StatefulSet, error) { // prepare misc values ports := splcommon.SortContainerPorts(getSplunkContainerPorts(instanceType)) // note that port order is important for tests @@ -465,30 +463,35 @@ func getSplunkStatefulSet(client splcommon.ControllerClient, cr splcommon.MetaOb return statefulSet, nil } -// getSmartstoreConfigMap returns the smartstore configMap, if it exists and applicable for that instanceType -func getSmartstoreConfigMap(client splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) (*corev1.ConfigMap, bool) { - var smartStoreConfigMapName string - if instanceType == SplunkStandalone || instanceType == SplunkClusterMaster { - smartStoreConfigMapName = GetSplunkSmartstoreConfigMapName(cr.GetName(), cr.GetObjectKind().GroupVersionKind().Kind) +// getAppListingConfigMap returns the App listing configMap, if it exists and applicable for that instanceType +func getAppListingConfigMap(client splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) *corev1.ConfigMap { + var configMap *corev1.ConfigMap + + // ToDo: Exclude MC, once it's own CR is available + if instanceType != SplunkIndexer && instanceType != SplunkSearchHead && instanceType != SplunkMonitoringConsole { + appsConfigMapName := GetSplunkAppsConfigMapName(cr.GetName(), cr.GetObjectKind().GroupVersionKind().Kind) + namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: appsConfigMapName} + configMap, _ = splctrl.GetConfigMap(client, namespacedName) } - if smartStoreConfigMapName != "" { - namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: smartStoreConfigMapName} - configMap, err := splctrl.GetConfigMap(client, namespacedName) - if err != nil { - // Do not return configMap name, unless the configMap really exists - return nil, false - } + return configMap +} - return configMap, true +// getSmartstoreConfigMap returns the smartstore configMap, if it exists and applicable for that instanceType +func getSmartstoreConfigMap(client splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType) *corev1.ConfigMap { + var configMap *corev1.ConfigMap + + if instanceType == SplunkStandalone || instanceType == SplunkClusterMaster { + smartStoreConfigMapName := GetSplunkSmartstoreConfigMapName(cr.GetName(), cr.GetObjectKind().GroupVersionKind().Kind) + namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: smartStoreConfigMapName} + configMap, _ = splctrl.GetConfigMap(client, namespacedName) } - // Do not return configMap name, unless the configMap really exists - return nil, false + return configMap } // updateSplunkPodTemplateWithConfig modifies the podTemplateSpec object based on configuration of the Splunk Enterprise resource. -func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTemplateSpec *corev1.PodTemplateSpec, cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec, instanceType InstanceType, extraEnv []corev1.EnvVar, secretToMount string) { +func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTemplateSpec *corev1.PodTemplateSpec, cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, instanceType InstanceType, extraEnv []corev1.EnvVar, secretToMount string) { scopedLog := log.WithName("updateSplunkPodTemplateWithConfig").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Add custom ports to splunk containers @@ -554,8 +557,8 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem } } - smartstoreConfigMap, exists := getSmartstoreConfigMap(client, cr, instanceType) - if exists { + smartstoreConfigMap := getSmartstoreConfigMap(client, cr, instanceType) + if smartstoreConfigMap != nil { addSplunkVolumeToTemplate(podTemplateSpec, "mnt-splunk-operator", "/mnt/splunk-operator/local/", corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ @@ -577,7 +580,19 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem if instanceType == SplunkStandalone { podTemplateSpec.ObjectMeta.Annotations[smartStoreConfigRev] = smartstoreConfigMap.ResourceVersion } + } + + appListingConfigMap := getAppListingConfigMap(client, cr, instanceType) + if appListingConfigMap != nil { + appVolumeSource := getVolumeSourceMountFromConfigMapData(appListingConfigMap, &configMapVolDefaultMode) + addSplunkVolumeToTemplate(podTemplateSpec, "mnt-app-listing", appConfLocationOnPod, appVolumeSource) + // ToDo: for Phase-2, to install the new apps, always reset the pod.(need to change the behavior for phase-3) + // Once the apps are installed, and on a reconcile entry triggered by polling interval expiry, if there is no new + // App changes on remote store, then the config map data is erased. In such case, no need to reset the Pod + if len(appListingConfigMap.Data) > 0 { + podTemplateSpec.ObjectMeta.Annotations[appListingRev] = appListingConfigMap.ResourceVersion + } } // update security context @@ -588,36 +603,23 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem FSGroup: &fsGroup, } - // use script provided by enterprise container to check if pod is alive - livenessProbe := &corev1.Probe{ - Handler: corev1.Handler{ - Exec: &corev1.ExecAction{ - Command: []string{ - "/sbin/checkstate.sh", - }, - }, - }, - InitialDelaySeconds: 300, - TimeoutSeconds: 30, - PeriodSeconds: 30, + var numberOfApps int + var appListingFiles []string + if appListingConfigMap != nil { + for key, appListingentry := range appListingConfigMap.Data { + if key != appsUpdateToken { + // One(to accomodate header) less than number of entries + numberOfApps += strings.Count(appListingentry, "\n") - 1 + appListingFiles = append(appListingFiles, key) + } + } + // Always sort the slice, so that map entries are ordered, to avoid pod resets + sort.Strings(appListingFiles) } - // pod is ready if container artifact file is created with contents of "started". - // this indicates that all the the ansible plays executed at startup have completed. - readinessProbe := &corev1.Probe{ - Handler: corev1.Handler{ - Exec: &corev1.ExecAction{ - Command: []string{ - "/bin/grep", - "started", - "/opt/container_artifact/splunk-container.state", - }, - }, - }, - InitialDelaySeconds: 10, - TimeoutSeconds: 5, - PeriodSeconds: 5, - } + livenessProbe := getLivenessProbe(cr, spec, int32(numberOfApps*avgAppInstallationTime)) + + readinessProbe := getReadinessProbe(cr, spec, int32(numberOfApps*avgAppInstallationTime)) // prepare defaults variable splunkDefaults := "/mnt/splunk-secrets/default.yml" @@ -636,6 +638,12 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem splunkDefaults = fmt.Sprintf("%s,%s", "/mnt/splunk-defaults/default.yml", splunkDefaults) } + if appListingConfigMap != nil { + for _, fileName := range appListingFiles { + splunkDefaults = fmt.Sprintf("%s%s,%s", appConfLocationOnPod, fileName, splunkDefaults) + } + } + // prepare container env variables role := instanceType.ToRole() if instanceType == SplunkStandalone && len(spec.ClusterMasterRef.Name) > 0 { @@ -683,7 +691,7 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem Namespace: cr.GetNamespace(), Name: spec.ClusterMasterRef.Name, } - masterIdxCluster := &enterprisev1.ClusterMaster{} + masterIdxCluster := &enterpriseApi.ClusterMaster{} err := client.Get(context.TODO(), namespacedName, masterIdxCluster) if err != nil { scopedLog.Error(err, "Unable to get ClusterMaster") @@ -728,27 +736,92 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem } } -// isSmartstoreEnabled checks and returns true if smartstore is configured -func isSmartstoreConfigured(smartstore *enterprisev1.SmartStoreSpec) bool { - if smartstore == nil { - return false +// getLivenessProbe the probe for checking the liveness of the Pod +// uses script provided by enterprise container to check if pod is alive +func getLivenessProbe(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, additionalDelay int32) *corev1.Probe { + scopedLog := log.WithName("getLivenessProbe").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + + livenessDelay := livenessProbeDefaultDelaySec + additionalDelay + if spec.LivenessInitialDelaySeconds > livenessDelay { + livenessDelay = spec.LivenessInitialDelaySeconds } - return smartstore.IndexList != nil || smartstore.VolList != nil || smartstore.Defaults.VolName != "" + scopedLog.Info("LivenessProbeInitialDelay", "configured", spec.LivenessInitialDelaySeconds, "additionalDelay", additionalDelay, "finalCalculatedValue", livenessDelay) + + livenessCommand := []string{ + "/sbin/checkstate.sh", + } + + return getProbe(livenessCommand, livenessDelay, livenessProbeTimeoutSec, livenessProbePeriodSec) } -func checkIfVolumeExists(volumeList []enterprisev1.VolumeSpec, volName string) (int, error) { - for i, volume := range volumeList { - if volume.Name == volName { - return i, nil - } +// getReadinessProbe provides the probe for checking the readiness of the Pod +// pod is ready if container artifact file is created with contents of "started". +// this indicates that all the the ansible plays executed at startup have completed. +func getReadinessProbe(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, additionalDelay int32) *corev1.Probe { + scopedLog := log.WithName("getReadinessProbe").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + + readinessDelay := readinessProbeDefaultDelaySec + additionalDelay + if spec.ReadinessInitialDelaySeconds > readinessDelay { + readinessDelay = spec.ReadinessInitialDelaySeconds } - return -1, fmt.Errorf("Volume: %s, doesn't exist", volName) + scopedLog.Info("ReadinessProbeInitialDelay", "configured", spec.ReadinessInitialDelaySeconds, "additionalDelay", additionalDelay, "finalCalculatedValue", readinessDelay) + + readinessCommand := []string{ + "/bin/grep", + "started", + "/opt/container_artifact/splunk-container.state", + } + + return getProbe(readinessCommand, readinessDelay, readinessProbeTimeoutSec, readinessProbePeriodSec) +} + +// getProbe returns the Probe for given values. +func getProbe(command []string, delay, timeout, period int32) *corev1.Probe { + return &corev1.Probe{ + Handler: corev1.Handler{ + Exec: &corev1.ExecAction{ + Command: command, + }, + }, + InitialDelaySeconds: delay, + TimeoutSeconds: timeout, + PeriodSeconds: period, + } +} + +// getVolumeSourceMountFromConfigMapData returns a volume source with the configMap Data entries +func getVolumeSourceMountFromConfigMapData(configMap *corev1.ConfigMap, mode *int32) corev1.VolumeSource { + volumeSource := corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.GetName(), + }, + DefaultMode: mode, + }, + } + + for key := range configMap.Data { + volumeSource.ConfigMap.Items = append(volumeSource.ConfigMap.Items, corev1.KeyToPath{Key: key, Path: key, Mode: mode}) + } + // Map traversal order is not guaranteed. Always sort the slice to avoid (random) pod resets due to the ordering + splcommon.SortSlice(volumeSource.ConfigMap.Items, splcommon.SortFieldKey) + + return volumeSource +} + +// isSmartstoreEnabled checks and returns true if smartstore is configured +func isSmartstoreConfigured(smartstore *enterpriseApi.SmartStoreSpec) bool { + if smartstore == nil { + return false + } + + return smartstore.IndexList != nil || smartstore.VolList != nil || smartstore.Defaults.VolName != "" } // AreRemoteVolumeKeysChanged discovers if the S3 keys changed -func AreRemoteVolumeKeysChanged(client splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType, smartstore *enterprisev1.SmartStoreSpec, ResourceRev map[string]string, retError *error) bool { +func AreRemoteVolumeKeysChanged(client splcommon.ControllerClient, cr splcommon.MetaObject, instanceType InstanceType, smartstore *enterpriseApi.SmartStoreSpec, ResourceRev map[string]string, retError *error) bool { // No need to proceed if the smartstore is not configured if isSmartstoreConfigured(smartstore) == false { return false @@ -782,89 +855,266 @@ func AreRemoteVolumeKeysChanged(client splcommon.ControllerClient, cr splcommon. return false } -// ValidateSplunkSmartstoreSpec checks and validates the smartstore config -func ValidateSplunkSmartstoreSpec(smartstore *enterprisev1.SmartStoreSpec) error { - var err error +// initAppFrameWorkContext used to initialize the app frame work context +func initAppFrameWorkContext(appFrameworkConf *enterpriseApi.AppFrameworkSpec, appStatusContext *enterpriseApi.AppDeploymentContext) { + if appStatusContext.AppsSrcDeployStatus == nil { + appStatusContext.AppsSrcDeployStatus = make(map[string]enterpriseApi.AppSrcDeployInfo) + } - // Smartstore is an optional config (at least) for now - if !isSmartstoreConfigured(smartstore) { + for _, vol := range appFrameworkConf.VolList { + if _, ok := splclient.S3Clients[vol.Provider]; !ok { + splclient.RegisterS3Client(vol.Provider) + } + } +} + +// getAppSrcScope returns the scope of a given appSource +func getAppSrcScope(appFrameworkConf *enterpriseApi.AppFrameworkSpec, appSrcName string) string { + for _, appSrc := range appFrameworkConf.AppSources { + if appSrc.Name == appSrcName { + if appSrc.Scope != "" { + return appSrc.Scope + } + + break + } + } + + return appFrameworkConf.Defaults.Scope +} + +// CheckIfAppSrcExistsInConfig returns if the given appSource is available in the configuration or not +func CheckIfAppSrcExistsInConfig(appFrameworkConf *enterpriseApi.AppFrameworkSpec, appSrcName string) bool { + for _, appSrc := range appFrameworkConf.AppSources { + if appSrc.Name == appSrcName { + return true + } + } + return false +} + +// validateSplunkAppSources validates the App source config in App Framework spec +func validateSplunkAppSources(appFramework *enterpriseApi.AppFrameworkSpec, localScope bool) error { + + duplicateAppSourceStorageChecker := make(map[string]bool) + duplicateAppSourceNameChecker := make(map[string]bool) + var vol string + + // Make sure that all the App Sources are provided with the mandatory config values. + for i, appSrc := range appFramework.AppSources { + if appSrc.Name == "" { + return fmt.Errorf("App Source name is missing for AppSource at: %d", i) + } + + if _, ok := duplicateAppSourceNameChecker[appSrc.Name]; ok { + return fmt.Errorf("Multiple app sources with the name %s is not allowed", appSrc.Name) + } + duplicateAppSourceNameChecker[appSrc.Name] = true + + if appSrc.Location == "" { + return fmt.Errorf("App Source location is missing for AppSource: %s", appSrc.Name) + } + + if appSrc.VolName != "" { + _, err := splclient.CheckIfVolumeExists(appFramework.VolList, appSrc.VolName) + if err != nil { + return fmt.Errorf("Invalid Volume Name for App Source: %s. %s", appSrc.Name, err) + } + vol = appSrc.VolName + } else { + if appFramework.Defaults.VolName == "" { + return fmt.Errorf("volumeName is missing for App Source: %s", appSrc.Name) + } + vol = appFramework.Defaults.VolName + } + + if appSrc.Scope != "" { + if localScope && appSrc.Scope != "local" { + return fmt.Errorf("Invalid scope for App Source: %s. Only local scope is supported for this kind of CR", appSrc.Name) + } + + if appSrc.Scope != "local" && appSrc.Scope != "cluster" { + return fmt.Errorf("Scope for App Source: %s should be either local or cluster", appSrc.Name) + } + } else { + if appFramework.Defaults.Scope == "" { + return fmt.Errorf("App Source scope is missing for: %s", appSrc.Name) + } + } + + if _, ok := duplicateAppSourceStorageChecker[vol+appSrc.Location]; ok { + return fmt.Errorf("Duplicate App Source configured for Volume: %s, and Location: %s combo. Remove the duplicate entry and reapply the configuration", vol, appSrc.Location) + } + duplicateAppSourceStorageChecker[vol+appSrc.Location] = true + + } + + if localScope && appFramework.Defaults.Scope != "" && appFramework.Defaults.Scope != "local" { + return fmt.Errorf("Invalid scope for defaults config. Only local scope is supported for this kind of CR") + } + + if appFramework.Defaults.Scope != "" && appFramework.Defaults.Scope != "local" && appFramework.Defaults.Scope != "cluster" { + return fmt.Errorf("Scope for defaults should be either local Or cluster, but configured as: %s", appFramework.Defaults.Scope) + } + + if appFramework.Defaults.VolName != "" { + _, err := splclient.CheckIfVolumeExists(appFramework.VolList, appFramework.Defaults.VolName) + if err != nil { + return fmt.Errorf("Invalid Volume Name for Defaults. Error: %s", err) + } + } + + return nil +} + +// isAppFrameworkConfigured checks and returns true if App Framework is configured +// App Repo config without any App sources will not cause any App Framework activity +func isAppFrameworkConfigured(appFramework *enterpriseApi.AppFrameworkSpec) bool { + return !(appFramework == nil || appFramework.AppSources == nil) +} + +// ValidateAppFrameworkSpec checks and validates the Apps Frame Work config +func ValidateAppFrameworkSpec(appFramework *enterpriseApi.AppFrameworkSpec, appContext *enterpriseApi.AppDeploymentContext, localScope bool) error { + var err error + if !isAppFrameworkConfigured(appFramework) { return nil } - numVolumes := len(smartstore.VolList) - numIndexes := len(smartstore.IndexList) - if numIndexes > 0 && numVolumes == 0 { - return fmt.Errorf("Volume configuration is missing. Num. of indexes = %d. Num. of Volumes = %d", numIndexes, numVolumes) + scopedLog := log.WithName("ValidateAppFrameworkSpec") + + scopedLog.Info("configCheck", "scope", localScope) + + // Set the value in status field to be same as that in spec. + appContext.AppsRepoStatusPollInterval = appFramework.AppsRepoPollInterval + + if appContext.AppsRepoStatusPollInterval == 0 { + scopedLog.Error(err, "appsRepoPollIntervalSeconds is not configured", "Setting it to the default value(seconds)", splcommon.DefaultAppsRepoPollInterval) + appContext.AppsRepoStatusPollInterval = splcommon.DefaultAppsRepoPollInterval + } else if appFramework.AppsRepoPollInterval < splcommon.MinAppsRepoPollInterval { + scopedLog.Error(err, "configured appsRepoPollIntervalSeconds is too small", "configured value", appFramework.AppsRepoPollInterval, "Setting it to the default min. value(seconds)", splcommon.MinAppsRepoPollInterval) + appContext.AppsRepoStatusPollInterval = splcommon.MinAppsRepoPollInterval + } else if appFramework.AppsRepoPollInterval > splcommon.MaxAppsRepoPollInterval { + scopedLog.Error(err, "configured appsRepoPollIntervalSeconds is too large", "configured value", appFramework.AppsRepoPollInterval, "Setting it to the default max. value(seconds)", splcommon.MaxAppsRepoPollInterval, "seconds", nil) + appContext.AppsRepoStatusPollInterval = splcommon.MaxAppsRepoPollInterval } + err = validateRemoteVolumeSpec(appFramework.VolList, true) + if err != nil { + return err + } + + err = validateSplunkAppSources(appFramework, localScope) + + if err == nil { + scopedLog.Info("App framework configuration is valid") + } + return err +} + +// validateRemoteVolumeSpec validates the Remote storage volume spec +func validateRemoteVolumeSpec(volList []enterpriseApi.VolumeSpec, isAppFramework bool) error { + duplicateChecker := make(map[string]bool) - volList := smartstore.VolList // Make sure that all the Volumes are provided with the mandatory config values. for i, volume := range volList { if _, ok := duplicateChecker[volume.Name]; ok { return fmt.Errorf("Duplicate volume name detected: %s. Remove the duplicate entry and reapply the configuration", volume.Name) } duplicateChecker[volume.Name] = true - // Make sure that the smartstore volume info is correct if volume.Name == "" { return fmt.Errorf("Volume name is missing for volume at : %d", i) } - if volume.Endpoint == "" { return fmt.Errorf("Volume Endpoint URI is missing") } - if volume.Path == "" { return fmt.Errorf("Volume Path is missing") } - if volume.SecretRef == "" { return fmt.Errorf("Volume SecretRef is missing") } - } - defaults := smartstore.Defaults - // When volName is configured, bucket remote path should also be configured - if defaults.VolName != "" { - _, err = checkIfVolumeExists(volList, defaults.VolName) - if err != nil { - return fmt.Errorf("Invalid configuration for defaults volume. %s", err) + // provider is used in App framework to pick the S3 client(aws, minio), and is not applicable to Smartstore + // For now, Smartstore supports only S3, which is by default. + if isAppFramework { + if volume.Type == "" { + return fmt.Errorf("Remote volume Type is missing") + } + if volume.Provider == "" { + return fmt.Errorf("S3 Provider is missing") + } } } + return nil +} - duplicateChecker = make(map[string]bool) - indexList := smartstore.IndexList - // Make sure that all the indexes are provided with the mandatory config values. - for i, index := range indexList { - if _, ok := duplicateChecker[index.Name]; ok { - return fmt.Errorf("Duplicate index name detected: %s.Remove the duplicate entry and reapply the configuration", index.Name) - } - duplicateChecker[index.Name] = true +// validateSplunkIndexesSpec validates the smartstore index spec +func validateSplunkIndexesSpec(smartstore *enterpriseApi.SmartStoreSpec) error { + duplicateChecker := make(map[string]bool) + + // Make sure that all the indexes are provided with the mandatory config values. + for i, index := range smartstore.IndexList { if index.Name == "" { return fmt.Errorf("Index name is missing for index at: %d", i) } - if index.VolName == "" && defaults.VolName == "" { + if _, ok := duplicateChecker[index.Name]; ok { + return fmt.Errorf("Duplicate index name detected: %s.Remove the duplicate entry and reapply the configuration", index.Name) + } + duplicateChecker[index.Name] = true + if index.VolName == "" && smartstore.Defaults.VolName == "" { return fmt.Errorf("volumeName is missing for index: %s", index.Name) } if index.VolName != "" { - _, err = checkIfVolumeExists(volList, index.VolName) + _, err := splclient.CheckIfVolumeExists(smartstore.VolList, index.VolName) if err != nil { return fmt.Errorf("Invalid configuration for index: %s. %s", index.Name, err) } } - } return nil } +// ValidateSplunkSmartstoreSpec checks and validates the smartstore config +func ValidateSplunkSmartstoreSpec(smartstore *enterpriseApi.SmartStoreSpec) error { + var err error + + // Smartstore is an optional config (at least) for now + if !isSmartstoreConfigured(smartstore) { + return nil + } + + numVolumes := len(smartstore.VolList) + numIndexes := len(smartstore.IndexList) + if numIndexes > 0 && numVolumes == 0 { + return fmt.Errorf("Volume configuration is missing. Num. of indexes = %d. Num. of Volumes = %d", numIndexes, numVolumes) + } + + err = validateRemoteVolumeSpec(smartstore.VolList, false) + if err != nil { + return err + } + + defaults := smartstore.Defaults + // When volName is configured, bucket remote path should also be configured + if defaults.VolName != "" { + _, err = splclient.CheckIfVolumeExists(smartstore.VolList, defaults.VolName) + if err != nil { + return fmt.Errorf("Invalid configuration for defaults volume. %s", err) + } + } + + err = validateSplunkIndexesSpec(smartstore) + return err +} + // GetSmartstoreVolumesConfig returns the list of Volumes configuration in INI format -func GetSmartstoreVolumesConfig(client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterprisev1.SmartStoreSpec, mapData map[string]string) (string, error) { +func GetSmartstoreVolumesConfig(client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec, mapData map[string]string) (string, error) { var volumesConf string volumes := smartstore.VolList @@ -888,7 +1138,7 @@ remote.s3.endpoint = %s } // GetSmartstoreIndexesConfig returns the list of indexes configuration in INI format -func GetSmartstoreIndexesConfig(indexes []enterprisev1.IndexSpec) string { +func GetSmartstoreIndexesConfig(indexes []enterpriseApi.IndexSpec) string { var indexesConf string @@ -937,7 +1187,7 @@ maxGlobalRawDataSizeMB = %d`, indexesConf, indexes[i].MaxGlobalRawDataSizeMB) } //GetServerConfigEntries prepares the server.conf entries, and returns as a string -func GetServerConfigEntries(cacheManagerConf *enterprisev1.CacheManagerSpec) string { +func GetServerConfigEntries(cacheManagerConf *enterpriseApi.CacheManagerSpec) string { if cacheManagerConf == nil { return "" } @@ -993,7 +1243,7 @@ max_concurrent_uploads = %d`, serverConfIni, cacheManagerConf.MaxConcurrentUploa } // GetSmartstoreIndexesDefaults fills the indexes.conf default stanza in INI format -func GetSmartstoreIndexesDefaults(defaults enterprisev1.IndexConfDefaultsSpec) string { +func GetSmartstoreIndexesDefaults(defaults enterpriseApi.IndexConfDefaultsSpec) string { remotePath := "$_index_name" diff --git a/pkg/splunk/enterprise/configuration_test.go b/pkg/splunk/enterprise/configuration_test.go index 2678404b8..5c7cba652 100644 --- a/pkg/splunk/enterprise/configuration_test.go +++ b/pkg/splunk/enterprise/configuration_test.go @@ -19,7 +19,7 @@ import ( "fmt" "testing" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -51,7 +51,7 @@ func marshalAndCompare(t *testing.T, compare interface{}, method string, want st } func TestGetSplunkService(t *testing.T) { - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -82,13 +82,13 @@ func TestGetSplunkService(t *testing.T) { } func TestGetSplunkDefaults(t *testing.T) { - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.IndexerClusterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{Defaults: "defaults_string"}, + Spec: enterpriseApi.IndexerClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{Defaults: "defaults_string"}, }, } @@ -103,14 +103,14 @@ func TestGetSplunkDefaults(t *testing.T) { } func TestGetService(t *testing.T) { - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.IndexerClusterSpec{ + Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 3, - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: splcommon.Spec{ ServiceTemplate: corev1.Service{ Spec: corev1.ServiceSpec{ @@ -133,7 +133,7 @@ func TestGetService(t *testing.T) { } func TestSetVolumeDefault(t *testing.T) { - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -200,18 +200,18 @@ func TestSetVolumeDefault(t *testing.T) { } func TestSmartstoreApplyClusterMasterFailsOnInvalidSmartStoreConfig(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "idxCluster", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "", Path: "testbucket-rs-london"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1"}, {Name: "salesdata2", RemotePath: "salesdata2"}, {Name: "salesdata3", RemotePath: ""}, @@ -229,20 +229,20 @@ func TestSmartstoreApplyClusterMasterFailsOnInvalidSmartStoreConfig(t *testing.T } func TestSmartstoreApplyStandaloneFailsOnInvalidSmartStoreConfig(t *testing.T) { - cr := enterprisev1.Standalone{ + cr := enterpriseApi.Standalone{ ObjectMeta: metav1.ObjectMeta{ Name: "standalone", Namespace: "test", }, - Spec: enterprisev1.StandaloneSpec{ + Spec: enterpriseApi.StandaloneSpec{ Replicas: 1, - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "", Path: "testbucket-rs-london"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "salesdata2"}, @@ -261,25 +261,25 @@ func TestSmartstoreApplyStandaloneFailsOnInvalidSmartStoreConfig(t *testing.T) { } func TestSmartStoreConfigDoesNotFailOnClusterMasterCR(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "CM", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ - {Name: "salesdata1", RemotePath: "remotepath1", IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexList: []enterpriseApi.IndexSpec{ + {Name: "salesdata1", RemotePath: "remotepath1", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, - {Name: "salesdata2", RemotePath: "remotepath2", IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + {Name: "salesdata2", RemotePath: "remotepath2", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, - {Name: "salesdata3", RemotePath: "remotepath3", IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + {Name: "salesdata3", RemotePath: "remotepath3", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -298,21 +298,21 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { var err error // Valid smartstore config - SmartStore := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStore := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -324,22 +324,22 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Missing Secret object reference with Volume config should fail - SmartStoreMultipleVolumes := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreMultipleVolumes := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol_1", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london"}, {Name: "msos_s2s3_vol_2", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret2"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -351,8 +351,8 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Smartstore config with missing endpoint for the volume errors out - SmartStoreVolumeWithNoRemoteEndPoint := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreVolumeWithNoRemoteEndPoint := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "", Path: "testbucket-rs-london"}, }, } @@ -363,8 +363,8 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Smartstore config with missing remote name for the volume - SmartStoreWithVolumeNameMissing := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreWithVolumeNameMissing := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london"}, }, } @@ -375,8 +375,8 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Smartstore config with missing path for the volume - SmartStoreWithVolumePathMissing := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreWithVolumePathMissing := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: ""}, }, } @@ -387,21 +387,21 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Smartstore config with missing index name - SmartStoreWithMissingIndexName := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreWithMissingIndexName := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -413,21 +413,21 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } //Smartstore config Index with VolName, but missing RemotePath errors out - SmartStoreWithMissingIndexLocation := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreWithMissingIndexLocation := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -439,18 +439,18 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Having defaults volume and remote path should not complain an index missing the volume and remotepath info. - SmartStoreConfWithDefaults := enterprisev1.SmartStoreSpec{ - Defaults: enterprisev1.IndexConfDefaultsSpec{ - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + SmartStoreConfWithDefaults := enterpriseApi.SmartStoreSpec{ + Defaults: enterpriseApi.IndexConfDefaultsSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, - VolList: []enterprisev1.VolumeSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1"}, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3"}, @@ -469,18 +469,18 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Configuring indexes without volume config should return error - SmartStoreWithoutVolumes := enterprisev1.SmartStoreSpec{ - IndexList: []enterprisev1.IndexSpec{ + SmartStoreWithoutVolumes := enterpriseApi.SmartStoreSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -492,23 +492,23 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Duplicate volume names should be rejected - SmartStoreWithDuplicateVolumes := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreWithDuplicateVolumes := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol-1", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, {Name: "msos_s2s3_vol-2", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, {Name: "msos_s2s3_vol-1", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -520,12 +520,12 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Defaults with invalid volume reference should return error - SmartStoreDefaultsWithNonExistingVolume := enterprisev1.SmartStoreSpec{ - Defaults: enterprisev1.IndexConfDefaultsSpec{ - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + SmartStoreDefaultsWithNonExistingVolume := enterpriseApi.SmartStoreSpec{ + Defaults: enterpriseApi.IndexConfDefaultsSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol-2"}, }, - VolList: []enterprisev1.VolumeSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol-1", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, } @@ -536,17 +536,17 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } //Duplicate index names should return an error - SmartStoreWithDuplicateIndexes := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreWithDuplicateIndexes := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata1", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -559,14 +559,14 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { // If the default volume is not configured, then each index should be configured // with an explicit volume info. If not, should return an error - SmartStoreVolumeMissingBothFromDefaultsAndIndex := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreVolumeMissingBothFromDefaultsAndIndex := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol-1", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1"}, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -578,13 +578,13 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } // Volume referenced from an index must be a valid volume - SmartStoreIndexesWithInvalidVolumeName := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStoreIndexesWithInvalidVolumeName := enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol-1", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol-2"}, }, }, @@ -596,34 +596,286 @@ func TestValidateSplunkSmartstoreSpec(t *testing.T) { } } +func TestValidateAppFrameworkSpec(t *testing.T) { + var err error + // Valid app framework config + AppFramework := enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret", Type: "s3", Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "cluster"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + } + + appFrameworkContext := enterpriseApi.AppDeploymentContext{ + AppsRepoStatusPollInterval: 60, + } + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err != nil { + t.Errorf("Valid App Framework configuration should not cause error: %v", err) + } + + AppFramework.VolList[0].SecretRef = "" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Missing Secret Object reference should error out") + } + AppFramework.VolList[0].SecretRef = "s3-secret" + + // App Framework config with missing App Source name + AppFramework.AppSources[0].Name = "" + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Should not accept an app source with missing name ") + } + + //App Framework config app source config with missing location(withot default location) should errro out + AppFramework.AppSources[0].Name = "adminApps" + AppFramework.AppSources[0].Location = "" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("An App Source with missing location should cause an error, when there is no default location configured") + } + AppFramework.AppSources[0].Location = "adminAppsRepo" + + // Having defaults volume and location should not complain an app source missing the volume and remote location info. + AppFramework.Defaults.Scope = "cluster" + AppFramework.Defaults.VolName = "msos_s2s3_vol" + AppFramework.AppSources[0].Scope = "" + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err != nil { + t.Errorf("Should accept an App Source with missing scope, when default scope is configured. But, got the error: %v", err) + } + AppFramework.AppSources[0].Location = "adminAppsRepo" + + // Empty App Repo config should not cause an error + err = ValidateAppFrameworkSpec(nil, &appFrameworkContext, false) + if err != nil { + t.Errorf("App Repo config is optional, should not cause an error. But, got the error: %v", err) + } + + // Configuring indexes without volume config should return error + AppFrameworkWithoutVolumeSpec := enterpriseApi.AppFrameworkSpec{ + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "cluster"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + } + + err = ValidateAppFrameworkSpec(&AppFrameworkWithoutVolumeSpec, &appFrameworkContext, false) + if err == nil { + t.Errorf("App Repo config without volume details should return error") + } + + // Defaults with invalid volume reference should return error + AppFramework.Defaults.VolName = "UnknownVolume" + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Volume referred in the defaults should be a valid volume") + } + + //Duplicate App Sources should return an error + tmpVolume := AppFramework.AppSources[1].VolName + tmpLocation := AppFramework.AppSources[1].Location + + AppFramework.AppSources[1].VolName = AppFramework.AppSources[0].VolName + AppFramework.AppSources[1].Location = AppFramework.AppSources[0].Location + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Duplicate app sources should return an error") + } + + AppFramework.AppSources[1].VolName = tmpVolume + AppFramework.AppSources[1].Location = tmpLocation + + // Duplicate app sources names should cause an error + tmpAppSourceName := AppFramework.AppSources[1].Name + AppFramework.AppSources[1].Name = AppFramework.AppSources[0].Name + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Failed to detect duplicate app source names") + } + AppFramework.AppSources[1].Name = tmpAppSourceName + + // If the default volume is not configured, then each index should be configured + // with an explicit volume info. If not, should return an error + AppFramework.AppSources[0].VolName = "" + AppFramework.Defaults.VolName = "" + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("If no default volume, App Source with missing volume info should return an error") + } + + // If the AppSource doesn't have VolName, and if the defaults have it, shouldn't cause an error + AppFramework.Defaults.VolName = "msos_s2s3_vol" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err != nil { + t.Errorf("If default volume, App Source with missing volume should not return an error, but got erros %v", err) + } + + // Volume referenced from an index must be a valid volume + AppFramework.AppSources[0].VolName = "UnknownVolume" + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Index with an invalid volume name should return error") + } + AppFramework.AppSources[0].VolName = "msos_s2s3_vol" + + // if the CR supports only local apps, and if the app source scope is not local, should return error + AppFramework.AppSources[0].Scope = "cluster" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, true) + if err == nil { + t.Errorf("When called with App scope local, any app sources with the cluster scope should return an error") + } + + // If the app scope value other than "local" or "cluster" should return an error + AppFramework.AppSources[0].Scope = "unknown" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Unsupported app scope should be cause error, but failed to detect") + } + + // If the CR supports only local apps, and default is configured with "cluster" scope, that should be detected + AppFramework.AppSources[0].Scope = "local" + AppFramework.AppSources[1].Scope = "local" + AppFramework.AppSources[2].Scope = "local" + + AppFramework.Defaults.Scope = "cluster" + + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, true) + if err == nil { + t.Errorf("When called with App scope local, defaults with the cluster scope should return an error") + } + AppFramework.AppSources[0].Scope = "local" + + // Default scope should be either "local" OR "cluster" + AppFramework.Defaults.Scope = "unknown" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Unsupported default scope should be cause error, but failed to detect") + } + AppFramework.Defaults.Scope = "cluster" + + // Missing scope, if the default scope is not specified should return error + AppFramework.Defaults.Scope = "" + AppFramework.AppSources[0].Scope = "" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Missing scope should be detected, but failed") + } + AppFramework.Defaults.Scope = "local" + AppFramework.AppSources[0].Scope = "local" + + // AppsRepoPollInterval should be in between the minAppsRepoPollInterval and maxAppsRepoPollInterval + // Default Poll interval + if splcommon.DefaultAppsRepoPollInterval < splcommon.MinAppsRepoPollInterval || splcommon.DefaultAppsRepoPollInterval > splcommon.MaxAppsRepoPollInterval { + t.Errorf("defaultAppsRepoPollInterval should be within the range [%d - %d]", splcommon.MinAppsRepoPollInterval, splcommon.MaxAppsRepoPollInterval) + } + + appFrameworkContext.AppsRepoStatusPollInterval = 0 + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err != nil { + t.Errorf("Got error on valid App Framework configuration. Error: %v", err) + } else if appFrameworkContext.AppsRepoStatusPollInterval != splcommon.DefaultAppsRepoPollInterval { + t.Errorf("Spec validation failed to set the Repo poll interval to the default value: %d", splcommon.DefaultAppsRepoPollInterval) + } + + // Check for minAppsRepoPollInterval + appFrameworkContext.AppsRepoStatusPollInterval = splcommon.MinAppsRepoPollInterval - 1 + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err != nil { + t.Errorf("Got error on valid App Framework configuration. Error: %v", err) + } else if appFrameworkContext.AppsRepoStatusPollInterval < splcommon.MinAppsRepoPollInterval { + t.Errorf("Spec validation is not able to set the the AppsRepoPollInterval to minAppsRepoPollInterval") + } + + // Check for maxAppsRepoPollInterval + appFrameworkContext.AppsRepoStatusPollInterval = splcommon.MaxAppsRepoPollInterval + 1 + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err != nil { + t.Errorf("Got error on valid App Framework configuration. Error: %v", err) + } else if appFrameworkContext.AppsRepoStatusPollInterval > splcommon.MaxAppsRepoPollInterval { + t.Errorf("Spec validation is not able to set the the AppsRepoPollInterval to maxAppsRepoPollInterval") + } + + // Invalid volume name in defaults should return an error + AppFramework.Defaults.VolName = "unknownVolume" + err = ValidateAppFrameworkSpec(&AppFramework, &appFrameworkContext, false) + if err == nil { + t.Errorf("Configuring Defaults with invalid volume name should return an error, but failed to detect") + } +} + func TestGetSmartstoreIndexesConfig(t *testing.T) { - SmartStoreIndexes := enterprisev1.SmartStoreSpec{ - IndexList: []enterprisev1.IndexSpec{ + SmartStoreIndexes := enterpriseApi.SmartStoreSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ MaxGlobalDataSizeMB: 6000, MaxGlobalRawDataSizeMB: 7000, VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", // Missing RemotePath should be filled with the default "$_index_name" - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ MaxGlobalDataSizeMB: 2000, MaxGlobalRawDataSizeMB: 3000, VolName: "msos_s2s3_vol"}, - IndexAndCacheManagerCommonSpec: enterprisev1.IndexAndCacheManagerCommonSpec{ + IndexAndCacheManagerCommonSpec: enterpriseApi.IndexAndCacheManagerCommonSpec{ HotlistBloomFilterRecencyHours: 48, HotlistRecencySecs: 48 * 60 * 60}, }, {Name: "salesdata4", // Missing RemotePath should be filled with the default "$_index_name" - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ MaxGlobalDataSizeMB: 4000, MaxGlobalRawDataSizeMB: 5000, VolName: "msos_s2s3_vol"}, - IndexAndCacheManagerCommonSpec: enterprisev1.IndexAndCacheManagerCommonSpec{ + IndexAndCacheManagerCommonSpec: enterpriseApi.IndexAndCacheManagerCommonSpec{ HotlistBloomFilterRecencyHours: 24, HotlistRecencySecs: 24 * 60 * 60}, }, @@ -661,8 +913,8 @@ maxGlobalRawDataSizeMB = 5000 } func TestGetServerConfigEntries(t *testing.T) { - SmartStoreCacheManager := enterprisev1.CacheManagerSpec{ - IndexAndCacheManagerCommonSpec: enterprisev1.IndexAndCacheManagerCommonSpec{ + SmartStoreCacheManager := enterpriseApi.CacheManagerSpec{ + IndexAndCacheManagerCommonSpec: enterpriseApi.IndexAndCacheManagerCommonSpec{ HotlistRecencySecs: 24 * 60 * 60, HotlistBloomFilterRecencyHours: 24, }, @@ -700,8 +952,8 @@ max_concurrent_uploads = 6 func TestGetSmartstoreIndexesDefaults(t *testing.T) { - SmartStoreDefaultsConf := enterprisev1.IndexConfDefaultsSpec{ - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + SmartStoreDefaultsConf := enterpriseApi.IndexConfDefaultsSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "s2s3_vol", MaxGlobalDataSizeMB: 50 * 1024, MaxGlobalRawDataSizeMB: 60 * 1024, @@ -728,65 +980,26 @@ maxGlobalRawDataSizeMB = 61440 } -func TestCheckIfVolumeExists(t *testing.T) { - SmartStoreConfig := enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ - {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, - }, - IndexList: []enterprisev1.IndexSpec{ - {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ - VolName: "msos_s2s3_vol"}, - }, - {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ - VolName: "msos_s2s3_vol"}, - }, - {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ - VolName: "msos_s2s3_vol"}, - }, - }, - } - - // Volume that doesn't should error out - _, err := checkIfVolumeExists(SmartStoreConfig.VolList, "random_volume_name") - - if err == nil { - t.Errorf("if the volume doesn't exists, error should be reported") - } - - // Volume that exists should not error out - index := len(SmartStoreConfig.VolList) - 1 - returnedIndex, err := checkIfVolumeExists(SmartStoreConfig.VolList, SmartStoreConfig.VolList[index].Name) - - if err != nil { - t.Errorf("existing volume should not error out. index id: %d, error: %s", index, err.Error()) - } else if index != returnedIndex { - t.Errorf("Expected index: %d, but returned index id: %d", index, returnedIndex) - } -} - func TestAreRemoteVolumeKeysChanged(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "CM", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ - {Name: "salesdata1", RemotePath: "remotepath1", IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexList: []enterpriseApi.IndexSpec{ + {Name: "salesdata1", RemotePath: "remotepath1", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, - {Name: "salesdata2", RemotePath: "remotepath2", IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + {Name: "salesdata2", RemotePath: "remotepath2", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, - {Name: "salesdata3", RemotePath: "remotepath3", IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + {Name: "salesdata3", RemotePath: "remotepath3", IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -858,7 +1071,7 @@ func TestAddStorageVolumes(t *testing.T) { var replicas int32 = 1 // Create CR - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "CM", Namespace: "test", @@ -891,7 +1104,7 @@ func TestAddStorageVolumes(t *testing.T) { } // Default spec - spec := &enterprisev1.CommonSplunkSpec{} + spec := &enterpriseApi.CommonSplunkSpec{} test := func(want string) { ss := statefulSet.DeepCopy() @@ -906,12 +1119,12 @@ func TestAddStorageVolumes(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"test-statefulset","namespace":"test","creationTimestamp":null},"spec":{"replicas":1,"selector":null,"template":{"metadata":{"creationTimestamp":null},"spec":{"containers":[{"name":"splunk","image":"test","resources":{},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"}]}]}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"","updateStrategy":{}},"status":{"replicas":0}}`) // Define PVCs for etc & var with storage capacity and storage class name defined - spec = &enterprisev1.CommonSplunkSpec{ - EtcVolumeStorageConfig: enterprisev1.StorageClassSpec{ + spec = &enterpriseApi.CommonSplunkSpec{ + EtcVolumeStorageConfig: enterpriseApi.StorageClassSpec{ StorageCapacity: "25Gi", StorageClassName: "gp2", }, - VarVolumeStorageConfig: enterprisev1.StorageClassSpec{ + VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{ StorageCapacity: "35Gi", StorageClassName: "gp3", }, @@ -919,23 +1132,23 @@ func TestAddStorageVolumes(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"test-statefulset","namespace":"test","creationTimestamp":null},"spec":{"replicas":1,"selector":null,"template":{"metadata":{"creationTimestamp":null},"spec":{"containers":[{"name":"splunk","image":"test","resources":{},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"}]}]}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"25Gi"}},"storageClassName":"gp2"},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"35Gi"}},"storageClassName":"gp3"},"status":{}}],"serviceName":"","updateStrategy":{}},"status":{"replicas":0}}`) // Define PVCs for etc & ephemeral for var - spec = &enterprisev1.CommonSplunkSpec{ - EtcVolumeStorageConfig: enterprisev1.StorageClassSpec{ + spec = &enterpriseApi.CommonSplunkSpec{ + EtcVolumeStorageConfig: enterpriseApi.StorageClassSpec{ StorageCapacity: "25Gi", StorageClassName: "gp2", }, - VarVolumeStorageConfig: enterprisev1.StorageClassSpec{ + VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{ EphemeralStorage: true, }, } test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"test-statefulset","namespace":"test","creationTimestamp":null},"spec":{"replicas":1,"selector":null,"template":{"metadata":{"creationTimestamp":null},"spec":{"volumes":[{"name":"mnt-splunk-var","emptyDir":{}}],"containers":[{"name":"splunk","image":"test","resources":{},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"mnt-splunk-var","mountPath":"/opt/splunk/var"}]}]}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"25Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"","updateStrategy":{}},"status":{"replicas":0}}`) // Define ephemeral for etc & PVCs for var - spec = &enterprisev1.CommonSplunkSpec{ - EtcVolumeStorageConfig: enterprisev1.StorageClassSpec{ + spec = &enterpriseApi.CommonSplunkSpec{ + EtcVolumeStorageConfig: enterpriseApi.StorageClassSpec{ EphemeralStorage: true, }, - VarVolumeStorageConfig: enterprisev1.StorageClassSpec{ + VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{ StorageCapacity: "25Gi", StorageClassName: "gp2", }, @@ -943,11 +1156,11 @@ func TestAddStorageVolumes(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"test-statefulset","namespace":"test","creationTimestamp":null},"spec":{"replicas":1,"selector":null,"template":{"metadata":{"creationTimestamp":null},"spec":{"volumes":[{"name":"mnt-splunk-etc","emptyDir":{}}],"containers":[{"name":"splunk","image":"test","resources":{},"volumeMounts":[{"name":"mnt-splunk-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"}]}]}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"25Gi"}},"storageClassName":"gp2"},"status":{}}],"serviceName":"","updateStrategy":{}},"status":{"replicas":0}}`) // Define ephemeral for etc & var(should ignore storage capacity & storage class name) - spec = &enterprisev1.CommonSplunkSpec{ - EtcVolumeStorageConfig: enterprisev1.StorageClassSpec{ + spec = &enterpriseApi.CommonSplunkSpec{ + EtcVolumeStorageConfig: enterpriseApi.StorageClassSpec{ EphemeralStorage: true, }, - VarVolumeStorageConfig: enterprisev1.StorageClassSpec{ + VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{ EphemeralStorage: true, StorageCapacity: "25Gi", StorageClassName: "gp2", @@ -956,8 +1169,8 @@ func TestAddStorageVolumes(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"test-statefulset","namespace":"test","creationTimestamp":null},"spec":{"replicas":1,"selector":null,"template":{"metadata":{"creationTimestamp":null},"spec":{"volumes":[{"name":"mnt-splunk-etc","emptyDir":{}},{"name":"mnt-splunk-var","emptyDir":{}}],"containers":[{"name":"splunk","image":"test","resources":{},"volumeMounts":[{"name":"mnt-splunk-etc","mountPath":"/opt/splunk/etc"},{"name":"mnt-splunk-var","mountPath":"/opt/splunk/var"}]}]}},"serviceName":"","updateStrategy":{}},"status":{"replicas":0}}`) // Define invalid EtcVolumeStorageConfig - spec = &enterprisev1.CommonSplunkSpec{ - EtcVolumeStorageConfig: enterprisev1.StorageClassSpec{ + spec = &enterpriseApi.CommonSplunkSpec{ + EtcVolumeStorageConfig: enterpriseApi.StorageClassSpec{ StorageCapacity: "----", }, } @@ -967,8 +1180,8 @@ func TestAddStorageVolumes(t *testing.T) { } // Define invalid VarVolumeStorageConfig - spec = &enterprisev1.CommonSplunkSpec{ - VarVolumeStorageConfig: enterprisev1.StorageClassSpec{ + spec = &enterpriseApi.CommonSplunkSpec{ + VarVolumeStorageConfig: enterpriseApi.StorageClassSpec{ StorageCapacity: "----", }, } @@ -978,3 +1191,102 @@ func TestAddStorageVolumes(t *testing.T) { } } + +func TestGetVolumeSourceMountFromConfigMapData(t *testing.T) { + var configMapName = "testConfgMap" + var namespace = "testNameSpace" + + dataMap := make(map[string]string) + dataMap["a"] = "x" + dataMap["b"] = "y" + dataMap["z"] = "z" + cm := splctrl.PrepareConfigMap(configMapName, namespace, dataMap) + var mode int32 = 755 + + test := func(cm *corev1.ConfigMap, mode *int32, want string) { + f := func() (interface{}, error) { + return getVolumeSourceMountFromConfigMapData(cm, mode), nil + } + configTester(t, "getVolumeSourceMountFromConfigMapData()", f, want) + + } + + test(cm, &mode, `{"configMap":{"name":"testConfgMap","items":[{"key":"a","path":"a","mode":755},{"key":"b","path":"b","mode":755},{"key":"z","path":"z","mode":755}],"defaultMode":755}}`) +} + +func TestGetLivenessProbe(t *testing.T) { + cr := &enterpriseApi.ClusterMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM", + Namespace: "test", + }, + } + spec := &cr.Spec.CommonSplunkSpec + + // Test if default delay works always + livenessProbe := getLivenessProbe(cr, spec, 0) + if livenessProbe.InitialDelaySeconds != livenessProbeDefaultDelaySec { + t.Errorf("Failed to set Liveness probe default delay") + } + + // Test if the default delay can be overwritten with configured delay + spec.LivenessInitialDelaySeconds = livenessProbeDefaultDelaySec + 10 + livenessProbe = getLivenessProbe(cr, spec, 0) + if livenessProbe.InitialDelaySeconds != spec.LivenessInitialDelaySeconds { + t.Errorf("Failed to set Liveness probe initial delay with configured value") + } + + // Test if the additional Delay can override the default and the cofigured delay values + livenessProbe = getLivenessProbe(cr, spec, 20) + if livenessProbe.InitialDelaySeconds != livenessProbeDefaultDelaySec+20 { + t.Errorf("Failed to set additional delay overriding the default and configured") + } +} + +func TestGetReadinessProbe(t *testing.T) { + cr := &enterpriseApi.ClusterMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "CM", + Namespace: "test", + }, + } + spec := &cr.Spec.CommonSplunkSpec + + // Test if default delay works always + readinessProbe := getReadinessProbe(cr, spec, 0) + if readinessProbe.InitialDelaySeconds != readinessProbeDefaultDelaySec { + t.Errorf("Failed to set Readiness probe default delay") + } + + // Test if the default delay can be overwritten with configured delay + spec.ReadinessInitialDelaySeconds = readinessProbeDefaultDelaySec + 10 + readinessProbe = getReadinessProbe(cr, spec, 0) + if readinessProbe.InitialDelaySeconds != spec.ReadinessInitialDelaySeconds { + t.Errorf("Failed to set Readiness probe initial delay with configured value") + } + + // Test if the additional Delay can override the default and the cofigured delay values + readinessProbe = getReadinessProbe(cr, spec, 20) + if readinessProbe.InitialDelaySeconds != readinessProbeDefaultDelaySec+20 { + t.Errorf("Failed to set additional delay overriding the default and configured") + } +} + +func TestGetProbe(t *testing.T) { + + command := []string{ + "grep", + "ready", + "file.txt", + } + + test := func(command []string, delay, timeout, period int32, want string) { + f := func() (interface{}, error) { + return getProbe(command, delay, timeout, period), nil + } + configTester(t, "getProbe()", f, want) + + } + + test(command, 100, 10, 10, `{"exec":{"command":["grep","ready","file.txt"]},"initialDelaySeconds":100,"timeoutSeconds":10,"periodSeconds":10}`) +} diff --git a/pkg/splunk/enterprise/finalizers_test.go b/pkg/splunk/enterprise/finalizers_test.go index aa79d1a19..b2fdbbaeb 100644 --- a/pkg/splunk/enterprise/finalizers_test.go +++ b/pkg/splunk/enterprise/finalizers_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -76,7 +76,7 @@ func splunkDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func(spl wantDeleted := false if cr.GetObjectMeta().GetDeletionTimestamp() != nil { wantDeleted = true - apiVersion, _ := schema.ParseGroupVersion(enterprisev1.APIVersion) + apiVersion, _ := schema.ParseGroupVersion(enterpriseApi.APIVersion) mockCalls["Update"] = []spltest.MockFuncCall{ {MetaName: fmt.Sprintf("*%s.%s-%s-%s", apiVersion.Version, cr.GetObjectKind().GroupVersionKind().Kind, cr.GetNamespace(), cr.GetName())}, } @@ -149,7 +149,7 @@ func splunkDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func(spl {MetaName: "*v1.StatefulSet-test-splunk-test-monitoring-console"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, - {MetaName: "*v1.ClusterMaster-test-master1"}, + {MetaName: "*v2.ClusterMaster-test-master1"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, } } @@ -212,7 +212,7 @@ func splunkPVCDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func( wantDeleted := false if cr.GetObjectMeta().GetDeletionTimestamp() != nil { wantDeleted = true - apiVersion, _ := schema.ParseGroupVersion(enterprisev1.APIVersion) + apiVersion, _ := schema.ParseGroupVersion(enterpriseApi.APIVersion) mockCalls["Update"] = []spltest.MockFuncCall{ {MetaName: fmt.Sprintf("*%s.%s-%s-%s", apiVersion.Version, cr.GetObjectKind().GroupVersionKind().Kind, cr.GetNamespace(), cr.GetName())}, } @@ -233,7 +233,7 @@ func splunkPVCDeletionTester(t *testing.T, cr splcommon.MetaObject, delete func( c.CheckCalls(t, "Testsplctrl.CheckForDeletion", mockCalls) } func TestDeleteSplunkPvc(t *testing.T) { - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -260,7 +260,7 @@ func TestDeleteSplunkPvc(t *testing.T) { } func TestDeleteSplunkClusterMasterPvc(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 7dfcc3a1a..81502115b 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/go-logr/logr" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -36,7 +36,7 @@ import ( ) // ApplyIndexerCluster reconciles the state of a Splunk Enterprise indexer cluster. -func ApplyIndexerCluster(client splcommon.ControllerClient, cr *enterprisev1.IndexerCluster) (reconcile.Result, error) { +func ApplyIndexerCluster(client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -57,7 +57,7 @@ func ApplyIndexerCluster(client splcommon.ControllerClient, cr *enterprisev1.Ind cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { - cr.Status.Peers = []enterprisev1.IndexerClusterMemberStatus{} + cr.Status.Peers = []enterpriseApi.IndexerClusterMemberStatus{} } if cr.Status.IndexerSecretChanged == nil { cr.Status.IndexerSecretChanged = []bool{} @@ -82,7 +82,7 @@ func ApplyIndexerCluster(client splcommon.ControllerClient, cr *enterprisev1.Ind Namespace: cr.GetNamespace(), Name: cr.Spec.ClusterMasterRef.Name, } - masterIdxCluster := &enterprisev1.ClusterMaster{} + masterIdxCluster := &enterpriseApi.ClusterMaster{} err = client.Get(context.TODO(), namespacedName, masterIdxCluster) if err == nil { cr.Status.ClusterMasterPhase = masterIdxCluster.Status.Phase @@ -170,13 +170,13 @@ func ApplyIndexerCluster(client splcommon.ControllerClient, cr *enterprisev1.Ind type indexerClusterPodManager struct { c splcommon.ControllerClient log logr.Logger - cr *enterprisev1.IndexerCluster + cr *enterpriseApi.IndexerCluster secrets *corev1.Secret newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient } // SetClusterMaintenanceMode enables/disables cluster maintenance mode -func SetClusterMaintenanceMode(c splcommon.ControllerClient, cr *enterprisev1.IndexerCluster, enable bool, mock bool) error { +func SetClusterMaintenanceMode(c splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster, enable bool, mock bool) error { // Retrieve admin password from Pod var masterIdxcName string if len(cr.Spec.ClusterMasterRef.Name) > 0 { @@ -555,7 +555,7 @@ func (mgr *indexerClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSe } for n := int32(0); n < statefulSet.Status.Replicas; n++ { peerName := GetSplunkStatefulsetPodName(SplunkIndexer, mgr.cr.GetName(), n) - peerStatus := enterprisev1.IndexerClusterMemberStatus{Name: peerName} + peerStatus := enterpriseApi.IndexerClusterMemberStatus{Name: peerName} peerInfo, ok := peers[peerName] if ok { peerStatus.ID = peerInfo.ID @@ -582,7 +582,7 @@ func (mgr *indexerClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSe } // getIndexerStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise indexers. -func getIndexerStatefulSet(client splcommon.ControllerClient, cr *enterprisev1.IndexerCluster) (*appsv1.StatefulSet, error) { +func getIndexerStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.IndexerCluster) (*appsv1.StatefulSet, error) { // Note: SPLUNK_INDEXER_URL is not used by the indexer pod containers, // hence avoided the call to getIndexerExtraEnv. // If other indexer CR specific env variables are required: @@ -593,7 +593,7 @@ func getIndexerStatefulSet(client splcommon.ControllerClient, cr *enterprisev1.I } // validateIndexerClusterSpec checks validity and makes default updates to a IndexerClusterSpec, and returns error if something is wrong. -func validateIndexerClusterSpec(cr *enterprisev1.IndexerCluster) error { +func validateIndexerClusterSpec(cr *enterpriseApi.IndexerCluster) error { // We cannot have 0 replicas in IndexerCluster spec, since this refers to number of indexers in an indexer cluster if cr.Spec.Replicas == 0 { cr.Spec.Replicas = 1 diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 96f72d4f0..3173738cb 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -37,14 +37,15 @@ func TestApplyIndexerCluster(t *testing.T) { funcCalls := []spltest.MockFuncCall{ {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, - {MetaName: "*v1.ClusterMaster-test-master1"}, + {MetaName: "*v2.ClusterMaster-test-master1"}, {MetaName: "*v1.Service-test-splunk-stack1-indexer-headless"}, {MetaName: "*v1.Service-test-splunk-stack1-indexer-service"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-indexer-secret-v1"}, - {MetaName: "*v1.ClusterMaster-test-master1"}, + {MetaName: "*v2.ClusterMaster-test-master1"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, } + labels := map[string]string{ "app.kubernetes.io/component": "versionedSecrets", "app.kubernetes.io/managed-by": "splunk-operator", @@ -58,7 +59,7 @@ func TestApplyIndexerCluster(t *testing.T) { createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[3], funcCalls[4], funcCalls[6]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} updateCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "List": {listmockCall[0]}} - current := enterprisev1.IndexerCluster{ + current := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -66,9 +67,9 @@ func TestApplyIndexerCluster(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.IndexerClusterSpec{ + Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterMasterRef: corev1.ObjectReference{ Name: "master1", }, @@ -81,17 +82,17 @@ func TestApplyIndexerCluster(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyIndexerCluster(c, cr.(*enterprisev1.IndexerCluster)) + _, err := ApplyIndexerCluster(c, cr.(*enterpriseApi.IndexerCluster)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyIndexerCluster", ¤t, revised, createCalls, updateCalls, reconcile, true) - // test deletion + // // test deletion currentTime := metav1.NewTime(time.Now()) revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyIndexerCluster(c, cr.(*enterprisev1.IndexerCluster)) + _, err := ApplyIndexerCluster(c, cr.(*enterpriseApi.IndexerCluster)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -99,7 +100,7 @@ func TestApplyIndexerCluster(t *testing.T) { func TestGetClusterMasterClient(t *testing.T) { scopedLog := log.WithName("TestGetClusterMasterClient") - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -107,15 +108,15 @@ func TestGetClusterMasterClient(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.IndexerClusterSpec{ + Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterMasterRef: corev1.ObjectReference{ Name: "", /* Empty ClusterMasterRef */ }, }, }, - Status: enterprisev1.IndexerClusterStatus{ + Status: enterpriseApi.IndexerClusterStatus{ ClusterMasterPhase: splcommon.PhaseReady, }, } @@ -149,7 +150,7 @@ func TestGetClusterMasterClient(t *testing.T) { func getIndexerClusterPodManager(method string, mockHandlers []spltest.MockHTTPHandler, mockSplunkClient *spltest.MockHTTPClient, replicas int32) *indexerClusterPodManager { scopedLog := log.WithName(method) - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -157,15 +158,15 @@ func getIndexerClusterPodManager(method string, mockHandlers []spltest.MockHTTPH Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.IndexerClusterSpec{ + Spec: enterpriseApi.IndexerClusterSpec{ Replicas: replicas, - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterMasterRef: corev1.ObjectReference{ Name: "master1", }, }, }, - Status: enterprisev1.IndexerClusterStatus{ + Status: enterpriseApi.IndexerClusterStatus{ ClusterMasterPhase: splcommon.PhaseReady, }, } @@ -674,7 +675,7 @@ func TestSetClusterMaintenanceMode(t *testing.T) { c.AddObjects(initObjectList) - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -830,7 +831,7 @@ func TestApplyIdxcSecret(t *testing.T) { }, } - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -957,14 +958,14 @@ func TestApplyIdxcSecret(t *testing.T) { func TestInvalidIndexerClusterSpec(t *testing.T) { - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", }, } - cm := enterprisev1.ClusterMaster{ + cm := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, @@ -998,7 +999,7 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { } func TestGetIndexerStatefulSet(t *testing.T) { - cr := enterprisev1.IndexerCluster{ + cr := enterpriseApi.IndexerCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index 4204b9de8..a3b6aba4e 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -16,19 +16,20 @@ package enterprise import ( "context" + "reflect" "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" ) // ApplyLicenseMaster reconciles the state for the Splunk Enterprise license master. -func ApplyLicenseMaster(client splcommon.ControllerClient, cr *enterprisev1.LicenseMaster) (reconcile.Result, error) { +func ApplyLicenseMaster(client splcommon.ControllerClient, cr *enterpriseApi.LicenseMaster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -36,12 +37,25 @@ func ApplyLicenseMaster(client splcommon.ControllerClient, cr *enterprisev1.Lice RequeueAfter: time.Second * 5, } + scopedLog := log.WithName("ApplyLicenseMaster").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + // validate and updates defaults for CR - err := validateLicenseMasterSpec(&cr.Spec) + err := validateLicenseMasterSpec(cr) if err != nil { + scopedLog.Error(err, "Failed to validate license master spec") return result, err } + // If the app framework is configured then do following things - + // 1. Initialize the S3Clients based on providers + // 2. Check the status of apps on remote storage. + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { + err := initAndCheckAppInfoStatus(client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + if err != nil { + return result, err + } + } + // updates status after function completes cr.Status.Phase = splcommon.PhaseError defer func() { @@ -90,21 +104,47 @@ func ApplyLicenseMaster(client splcommon.ControllerClient, cr *enterprisev1.Lice // no need to requeue if everything is ready if cr.Status.Phase == splcommon.PhaseReady { + if cr.Status.AppContext.AppsSrcDeployStatus != nil { + markAppsStatusToComplete(cr.Status.AppContext.AppsSrcDeployStatus) + } + err = ApplyMonitoringConsole(client, cr, cr.Spec.CommonSplunkSpec, getLicenseMasterURL(cr, &cr.Spec.CommonSplunkSpec)) if err != nil { return result, err } - result.Requeue = false + + // Requeue the reconcile after polling interval if we had set the lastAppInfoCheckTime. + if cr.Status.AppContext.LastAppInfoCheckTime != 0 { + result.RequeueAfter = GetNextRequeueTime(cr.Status.AppContext.AppsRepoStatusPollInterval, cr.Status.AppContext.LastAppInfoCheckTime) + } else { + result.Requeue = false + } } return result, nil } // getLicenseMasterStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. -func getLicenseMasterStatefulSet(client splcommon.ControllerClient, cr *enterprisev1.LicenseMaster) (*appsv1.StatefulSet, error) { - return getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkLicenseMaster, 1, []corev1.EnvVar{}) +func getLicenseMasterStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.LicenseMaster) (*appsv1.StatefulSet, error) { + ss, err := getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkLicenseMaster, 1, []corev1.EnvVar{}) + if err != nil { + return ss, err + } + + // Setup App framework init containers + setupAppInitContainers(client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) + + return ss, err } // validateLicenseMasterSpec checks validity and makes default updates to a LicenseMasterSpec, and returns error if something is wrong. -func validateLicenseMasterSpec(spec *enterprisev1.LicenseMasterSpec) error { - return validateCommonSplunkSpec(&spec.CommonSplunkSpec) +func validateLicenseMasterSpec(cr *enterpriseApi.LicenseMaster) error { + + if !reflect.DeepEqual(cr.Status.AppContext.AppFrameworkConfig, cr.Spec.AppFrameworkConfig) { + err := ValidateAppFrameworkSpec(&cr.Spec.AppFrameworkConfig, &cr.Status.AppContext, true) + if err != nil { + return err + } + } + + return validateCommonSplunkSpec(&cr.Spec.CommonSplunkSpec) } diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go index 142a95739..71e438b48 100644 --- a/pkg/splunk/enterprise/licensemaster_test.go +++ b/pkg/splunk/enterprise/licensemaster_test.go @@ -22,7 +22,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" @@ -35,6 +36,7 @@ func TestApplyLicenseMaster(t *testing.T) { {MetaName: "*v1.Service-test-splunk-stack1-license-master-service"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-license-master-secret-v1"}, + {MetaName: "*v1.ConfigMap-test-splunk-stack1-licensemaster-app-list"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-license-master"}, } labels := map[string]string{ @@ -47,9 +49,9 @@ func TestApplyLicenseMaster(t *testing.T) { } listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[4], funcCalls[5]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} - updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[5]}, "Update": {funcCalls[5]}, "List": {listmockCall[0]}} - current := enterprisev1.LicenseMaster{ + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[4], funcCalls[6]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Update": {funcCalls[6]}, "List": {listmockCall[0]}} + current := enterpriseApi.LicenseMaster{ TypeMeta: metav1.TypeMeta{ Kind: "LicenseMaster", }, @@ -61,7 +63,7 @@ func TestApplyLicenseMaster(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyLicenseMaster(c, cr.(*enterprisev1.LicenseMaster)) + _, err := ApplyLicenseMaster(c, cr.(*enterpriseApi.LicenseMaster)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyLicenseMaster", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -71,14 +73,14 @@ func TestApplyLicenseMaster(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyLicenseMaster(c, cr.(*enterprisev1.LicenseMaster)) + _, err := ApplyLicenseMaster(c, cr.(*enterpriseApi.LicenseMaster)) return true, err } splunkDeletionTester(t, revised, deleteFunc) } func TestGetLicenseMasterStatefulSet(t *testing.T) { - cr := enterprisev1.LicenseMaster{ + cr := enterpriseApi.LicenseMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -93,7 +95,7 @@ func TestGetLicenseMasterStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := validateLicenseMasterSpec(&cr.Spec); err != nil { + if err := validateLicenseMasterSpec(&cr); err != nil { t.Errorf("validateLicenseMasterSpec() returned error: %v", err) } return getLicenseMasterStatefulSet(c, &cr) @@ -131,3 +133,384 @@ func TestGetLicenseMasterStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-license-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-license-master-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/apps/apps.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_license_master"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_LICENSE_URI","value":"/mnt/splunk.lic"},{"name":"TEST_ENV_VAR","value":"test_value"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-license-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-license-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } + +func TestAppFrameworkApplyLicenseMasterShouldNotFail(t *testing.T) { + cr := enterpriseApi.LicenseMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseMasterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret", Type: "s3", Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + _, err = ApplyLicenseMaster(client, &cr) + if err != nil { + t.Errorf("ApplyLicenseMaster should be successful") + } +} + +func TestLicenseMasterGetAppsListForAWSS3ClientShouldNotFail(t *testing.T) { + cr := enterpriseApi.LicenseMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseMasterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + Defaults: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol2", + Scope: "local", + }, + VolList: []enterpriseApi.VolumeSpec{ + { + Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + { + Name: "msos_s2s3_vol2", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london2", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + { + Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local", + }, + }, + { + Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local", + }, + }, + { + Name: "authenticationApps", + Location: "authenticationAppsRepo", + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1", "5055a61b3d1b667a4c3279a381a2e7ae", "19779168370b97d8654424e6c9446dd9"} + Keys := []string{"admin_app.tgz", "security_app.tgz", "authentication_app.tgz"} + Sizes := []int64{10, 20, 30} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[1], + Key: &Keys[1], + LastModified: &randomTime, + Size: &Sizes[1], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[2], + Key: &Keys[2], + LastModified: &randomTime, + Size: &Sizes[2], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := cr.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + var allSuccess bool = true + for index, appSource := range appFrameworkRef.AppSources { + + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + allSuccess = false + continue + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{client: client, + cr: &cr, appFrameworkRef: &cr.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + cl := spltest.MockAWSS3Client{} + cl.Objects = mockAwsObjects[index].Objects + return cl + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + s3Response, err := s3ClientMgr.GetAppsList() + if err != nil { + allSuccess = false + continue + } + + var mockResponse spltest.MockAWSS3Client + mockResponse, err = splclient.ConvertS3Response(s3Response) + if err != nil { + allSuccess = false + continue + } + + if mockAwsHandler.GotSourceAppListResponseMap == nil { + mockAwsHandler.GotSourceAppListResponseMap = make(map[string]spltest.MockAWSS3Client) + } + + mockAwsHandler.GotSourceAppListResponseMap[appSource.Name] = mockResponse + } + + if allSuccess == false { + t.Errorf("Unable to get apps list for all the app sources") + } + method := "GetAppsList" + mockAwsHandler.CheckAWSS3Response(t, method) +} + +func TestLicenseMasterGetAppsListForAWSS3ClientShouldFail(t *testing.T) { + lm := enterpriseApi.LicenseMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.LicenseMasterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1"} + Keys := []string{"admin_app.tgz"} + Sizes := []int64{10} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := lm.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + + appSource := appFrameworkRef.AppSources[0] + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + t.Errorf("Unable to get Volume due to error=%s", err) + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{ + client: client, + cr: &lm, + appFrameworkRef: &lm.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + // Purposefully return nil here so that we test the error scenario + return nil + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, + appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, + location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + // Get the mock client + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as there is no S3 secret provided") + } + + // Create empty S3 secret + s3Secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3-secret", + Namespace: "test", + }, + Data: map[string][]byte{}, + } + + client.AddObject(&s3Secret) + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty keys") + } + + s3AccessKey := []byte{'1'} + s3Secret.Data = map[string][]byte{"s3_access_key": s3AccessKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_secret_key") + } + + s3SecretKey := []byte{'2'} + s3Secret.Data = map[string][]byte{"s3_secret_key": s3SecretKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_access_key") + } + + // Create S3 secret + s3Secret = spltest.GetMockS3SecretKeys("s3-secret") + + // This should return an error as we have initialized initFn for s3ClientMgr + // to return a nil client. + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we could not get the S3 client") + } + + s3ClientMgr.initFn = func(region, accessKeyID, secretAccessKey string) interface{} { + // To test the error scenario, do no set the Objects member yet + cl := spltest.MockAWSS3Client{} + return cl + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we have empty objects in MockAWSS3Client") + } +} diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index d65094540..d98dabb3d 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -21,7 +21,7 @@ import ( "sort" "strings" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -34,7 +34,7 @@ import ( ) // ApplyMonitoringConsole creates the statefulset for monitoring console statefulset of Splunk Enterprise. -func ApplyMonitoringConsole(client splcommon.ControllerClient, cr splcommon.MetaObject, spec enterprisev1.CommonSplunkSpec, extraEnv []corev1.EnvVar) error { +func ApplyMonitoringConsole(client splcommon.ControllerClient, cr splcommon.MetaObject, spec enterpriseApi.CommonSplunkSpec, extraEnv []corev1.EnvVar) error { secrets, err := splutil.GetLatestVersionedSecret(client, cr, cr.GetNamespace(), GetSplunkStatefulsetName(SplunkMonitoringConsole, cr.GetNamespace())) if err != nil { return err @@ -124,13 +124,13 @@ func (mgr *monitoringConsolePodManager) getClusterMasterClient(cr splcommon.Meta // monitoringConsolePodManager is used to manage the monitoring console pod type monitoringConsolePodManager struct { cr *splcommon.MetaObject - spec *enterprisev1.CommonSplunkSpec + spec *enterpriseApi.CommonSplunkSpec secrets *corev1.Secret newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient } // getMonitoringConsoleStatefulSet returns a Kubernetes Statefulset object for Splunk Enterprise monitoring console instance. -func getMonitoringConsoleStatefulSet(client splcommon.ControllerClient, cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec, instanceType InstanceType, secretName string) (*appsv1.StatefulSet, error) { +func getMonitoringConsoleStatefulSet(client splcommon.ControllerClient, cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec, instanceType InstanceType, secretName string) (*appsv1.StatefulSet, error) { var partOfIdentifier string var monitoringConsoleConfigMap *corev1.ConfigMap // there will be always 1 replica of monitoring console diff --git a/pkg/splunk/enterprise/monitoringconsole_test.go b/pkg/splunk/enterprise/monitoringconsole_test.go index 34971d53c..de83c1142 100644 --- a/pkg/splunk/enterprise/monitoringconsole_test.go +++ b/pkg/splunk/enterprise/monitoringconsole_test.go @@ -23,14 +23,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" ) func TestApplyMonitoringConsole(t *testing.T) { - standaloneCR := enterprisev1.Standalone{ + standaloneCR := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -88,7 +88,7 @@ func TestApplyMonitoringConsole(t *testing.T) { } reconcile := func(c *spltest.MockClient, cr interface{}) error { - obj := cr.(*enterprisev1.Standalone) + obj := cr.(*enterpriseApi.Standalone) err := ApplyMonitoringConsole(c, obj, obj.Spec.CommonSplunkSpec, env) return err } @@ -208,14 +208,14 @@ func TestApplyMonitoringConsoleEnvConfigMap(t *testing.T) { } func TestGetMonitoringConsoleStatefulSet(t *testing.T) { - cr := enterprisev1.SearchHeadCluster{ + cr := enterpriseApi.SearchHeadCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.SearchHeadClusterSpec{ + Spec: enterpriseApi.SearchHeadClusterSpec{ Replicas: 3, - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterMasterRef: corev1.ObjectReference{ Name: "stack1", }, @@ -240,7 +240,7 @@ func TestGetMonitoringConsoleStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := validateSearchHeadClusterSpec(&cr.Spec); err != nil { + if err := validateSearchHeadClusterSpec(&cr); err != nil { t.Errorf("validateSearchHeadClusterSpec() returned error: %v", err) } return getMonitoringConsoleStatefulSet(c, &cr, &cr.Spec.CommonSplunkSpec, SplunkMonitoringConsole, "splunk-test-secret") diff --git a/pkg/splunk/enterprise/names.go b/pkg/splunk/enterprise/names.go index fb4ce6903..19cd86482 100644 --- a/pkg/splunk/enterprise/names.go +++ b/pkg/splunk/enterprise/names.go @@ -41,6 +41,12 @@ const ( // identifier smartstoreTemplateStr = "splunk-%s-%s-smartstore" + // identifier + appListingTemplateStr = "splunk-%s-%s-app-list" + + // init container name + initContainerTemplate = "%s-init-%d-%s" + // default docker image used for Splunk instances defaultSplunkImage = "splunk/splunk" @@ -56,6 +62,12 @@ const ( // identifier to track the smartstore config rev. on Pod smartStoreConfigRev = "SmartStoreConfigRev" + // ToDo: Used only for Phase-2, to be removed later + appListingRev = "appListingRev" + + // Pod location for app related config + appConfLocationOnPod = "/mnt/app-listing/" + // command merger commandMerger = " && " @@ -65,9 +77,12 @@ const ( // command for init container on a CM commandForCMSmartstore = "mkdir -p /opt/splk/etc/master-apps/splunk-operator/local && ln -sfn /mnt/splunk-operator/local/indexes.conf /opt/splk/etc/master-apps/splunk-operator/local/indexes.conf && ln -sfn /mnt/splunk-operator/local/server.conf /opt/splk/etc/master-apps/splunk-operator/local/server.conf" - //smartstoreconfigToken used to track if the config is reflecting on Pod or not + // configToken used to track if the config is reflecting on Pod or not configToken = "conftoken" + // appsUpdateToken used to track if the if the latest app list is reflecting on pod or not + appsUpdateToken = "appsUpdateToken" + // port names and templates and protocols portNameTemplateStr = "%s-%s" @@ -79,6 +94,25 @@ const ( protoHTTP = "http" protoHTTPS = "https" protoTCP = "tcp" + + // Volume name for shared volume between init and splunk containers + appVolumeMntName = "init-apps" + + // Mount location for the shared app package volume + appBktMnt = "/init-apps/" + + // Average amount of time an app installation takes + avgAppInstallationTime = 5 + + // Readiness probe time values + readinessProbeDefaultDelaySec = 10 + readinessProbeTimeoutSec = 5 + readinessProbePeriodSec = 5 + + // Liveness probe time values + livenessProbeDefaultDelaySec = 300 + livenessProbeTimeoutSec = 30 + livenessProbePeriodSec = 30 ) // GetSplunkDeploymentName uses a template to name a Kubernetes Deployment for Splunk instances. @@ -124,6 +158,11 @@ func GetSplunkSmartstoreConfigMapName(identifier string, crKind string) string { return fmt.Sprintf(smartstoreTemplateStr, identifier, strings.ToLower(crKind)) } +// GetSplunkAppsConfigMapName uses a template to name a Kubernetes ConfigMap for a SplunkEnterprise resource. +func GetSplunkAppsConfigMapName(identifier string, crKind string) string { + return fmt.Sprintf(appListingTemplateStr, identifier, strings.ToLower(crKind)) +} + // GetSplunkStatefulsetUrls returns a list of fully qualified domain names for all pods within a Splunk StatefulSet. func GetSplunkStatefulsetUrls(namespace string, instanceType InstanceType, identifier string, replicas int32, hostnameOnly bool) string { urls := make([]string, replicas) diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index c5995e90f..b63afd9ca 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -17,6 +17,7 @@ package enterprise import ( "context" "fmt" + "reflect" "time" "github.com/go-logr/logr" @@ -24,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" @@ -32,7 +33,7 @@ import ( ) // ApplySearchHeadCluster reconciles the state for a Splunk Enterprise search head cluster. -func ApplySearchHeadCluster(client splcommon.ControllerClient, cr *enterprisev1.SearchHeadCluster) (reconcile.Result, error) { +func ApplySearchHeadCluster(client splcommon.ControllerClient, cr *enterpriseApi.SearchHeadCluster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ Requeue: true, @@ -41,18 +42,28 @@ func ApplySearchHeadCluster(client splcommon.ControllerClient, cr *enterprisev1. scopedLog := log.WithName("ApplySearchHeadCluster").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // validate and updates defaults for CR - err := validateSearchHeadClusterSpec(&cr.Spec) + err := validateSearchHeadClusterSpec(cr) if err != nil { return result, err } + // If the app framework is configured then do following things - + // 1. Initialize the S3Clients based on providers + // 2. Check the status of apps on remote storage. + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { + err := initAndCheckAppInfoStatus(client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + if err != nil { + return result, err + } + } + // updates status after function completes cr.Status.Phase = splcommon.PhaseError cr.Status.DeployerPhase = splcommon.PhaseError cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-search-head", cr.GetName()) if cr.Status.Members == nil { - cr.Status.Members = []enterprisev1.SearchHeadClusterMemberStatus{} + cr.Status.Members = []enterpriseApi.SearchHeadClusterMemberStatus{} } if cr.Status.ShcSecretChanged == nil { cr.Status.ShcSecretChanged = []bool{} @@ -138,11 +149,21 @@ func ApplySearchHeadCluster(client splcommon.ControllerClient, cr *enterprisev1. // no need to requeue if everything is ready if cr.Status.Phase == splcommon.PhaseReady { + if cr.Status.AppContext.AppsSrcDeployStatus != nil { + markAppsStatusToComplete(cr.Status.AppContext.AppsSrcDeployStatus) + } + err = ApplyMonitoringConsole(client, cr, cr.Spec.CommonSplunkSpec, getSearchHeadEnv(cr)) if err != nil { return result, err } - result.Requeue = false + + // Requeue the reconcile after polling interval if we had set the lastAppInfoCheckTime. + if cr.Status.AppContext.LastAppInfoCheckTime != 0 { + result.RequeueAfter = GetNextRequeueTime(cr.Status.AppContext.AppsRepoStatusPollInterval, cr.Status.AppContext.LastAppInfoCheckTime) + } else { + result.Requeue = false + } // Reset secrets related status structs cr.Status.ShcSecretChanged = []bool{} @@ -157,7 +178,7 @@ func ApplySearchHeadCluster(client splcommon.ControllerClient, cr *enterprisev1. type searchHeadClusterPodManager struct { c splcommon.ControllerClient log logr.Logger - cr *enterprisev1.SearchHeadCluster + cr *enterpriseApi.SearchHeadCluster secrets *corev1.Secret newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient } @@ -451,7 +472,7 @@ func (mgr *searchHeadClusterPodManager) updateStatus(statefulSet *appsv1.Statefu for n := int32(0); n < statefulSet.Status.Replicas; n++ { c := mgr.getClient(n) memberName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), n) - memberStatus := enterprisev1.SearchHeadClusterMemberStatus{Name: memberName} + memberStatus := enterpriseApi.SearchHeadClusterMemberStatus{Name: memberName} memberInfo, err := c.GetSearchHeadClusterMemberInfo() if err == nil { memberStatus.Status = memberInfo.Status @@ -494,7 +515,7 @@ func (mgr *searchHeadClusterPodManager) updateStatus(statefulSet *appsv1.Statefu } // getSearchHeadStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise search heads. -func getSearchHeadStatefulSet(client splcommon.ControllerClient, cr *enterprisev1.SearchHeadCluster) (*appsv1.StatefulSet, error) { +func getSearchHeadStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.SearchHeadCluster) (*appsv1.StatefulSet, error) { // get search head env variables with deployer env := getSearchHeadEnv(cr) @@ -509,14 +530,30 @@ func getSearchHeadStatefulSet(client splcommon.ControllerClient, cr *enterprisev } // getDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. -func getDeployerStatefulSet(client splcommon.ControllerClient, cr *enterprisev1.SearchHeadCluster) (*appsv1.StatefulSet, error) { - return getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(cr, cr.Spec.Replicas)) +func getDeployerStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.SearchHeadCluster) (*appsv1.StatefulSet, error) { + ss, err := getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(cr, cr.Spec.Replicas)) + if err != nil { + return ss, err + } + + // Setup App framework init containers + setupAppInitContainers(client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) + + return ss, err } // validateSearchHeadClusterSpec checks validity and makes default updates to a SearchHeadClusterSpec, and returns error if something is wrong. -func validateSearchHeadClusterSpec(spec *enterprisev1.SearchHeadClusterSpec) error { - if spec.Replicas < 3 { - spec.Replicas = 3 +func validateSearchHeadClusterSpec(cr *enterpriseApi.SearchHeadCluster) error { + if cr.Spec.Replicas < 3 { + cr.Spec.Replicas = 3 } - return validateCommonSplunkSpec(&spec.CommonSplunkSpec) + + if !reflect.DeepEqual(cr.Status.AppContext.AppFrameworkConfig, cr.Spec.AppFrameworkConfig) { + err := ValidateAppFrameworkSpec(&cr.Spec.AppFrameworkConfig, &cr.Status.AppContext, false) + if err != nil { + return err + } + } + + return validateCommonSplunkSpec(&cr.Spec.CommonSplunkSpec) } diff --git a/pkg/splunk/enterprise/searchheadcluster_test.go b/pkg/splunk/enterprise/searchheadcluster_test.go index 88c265e61..0bdcba491 100644 --- a/pkg/splunk/enterprise/searchheadcluster_test.go +++ b/pkg/splunk/enterprise/searchheadcluster_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -42,6 +42,7 @@ func TestApplySearchHeadCluster(t *testing.T) { {MetaName: "*v1.Service-test-splunk-stack1-deployer-service"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-deployer-secret-v1"}, + {MetaName: "*v1.ConfigMap-test-splunk-stack1-searchheadcluster-app-list"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-deployer"}, {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-search-head-secret-v1"}, @@ -59,9 +60,9 @@ func TestApplySearchHeadCluster(t *testing.T) { listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[6], funcCalls[7], funcCalls[9], funcCalls[10]}, "Update": {funcCalls[0]}, "List": {listmockCall[0], listmockCall[0]}} - updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[6], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[11]}, "Update": {funcCalls[7], funcCalls[10]}, "List": {listmockCall[0], listmockCall[0]}} - statefulSet := enterprisev1.SearchHeadCluster{ + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[6], funcCalls[8], funcCalls[10], funcCalls[11]}, "Update": {funcCalls[0]}, "List": {listmockCall[0], listmockCall[0]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Update": {funcCalls[8], funcCalls[11]}, "List": {listmockCall[0], listmockCall[0]}} + statefulSet := enterpriseApi.SearchHeadCluster{ TypeMeta: metav1.TypeMeta{ Kind: "SearchHeadCluster", }, @@ -79,7 +80,7 @@ func TestApplySearchHeadCluster(t *testing.T) { revised := statefulSet.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplySearchHeadCluster(c, cr.(*enterprisev1.SearchHeadCluster)) + _, err := ApplySearchHeadCluster(c, cr.(*enterpriseApi.SearchHeadCluster)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplySearchHeadCluster", &statefulSet, revised, createCalls, updateCalls, reconcile, true) @@ -89,7 +90,7 @@ func TestApplySearchHeadCluster(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplySearchHeadCluster(c, cr.(*enterprisev1.SearchHeadCluster)) + _, err := ApplySearchHeadCluster(c, cr.(*enterpriseApi.SearchHeadCluster)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -101,7 +102,7 @@ func searchHeadClusterPodManagerTester(t *testing.T, method string, mockHandlers // test for updating scopedLog := log.WithName(method) - cr := enterprisev1.SearchHeadCluster{ + cr := enterpriseApi.SearchHeadCluster{ TypeMeta: metav1.TypeMeta{ Kind: "SearchHeadCluster", }, @@ -388,7 +389,7 @@ func TestApplyShcSecret(t *testing.T) { }, } - cr := enterprisev1.SearchHeadCluster{ + cr := enterpriseApi.SearchHeadCluster{ TypeMeta: metav1.TypeMeta{ Kind: "SearchHeadCluster", }, @@ -517,7 +518,7 @@ func TestApplyShcSecret(t *testing.T) { } func TestGetSearchHeadStatefulSet(t *testing.T) { - cr := enterprisev1.SearchHeadCluster{ + cr := enterpriseApi.SearchHeadCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -532,7 +533,7 @@ func TestGetSearchHeadStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := validateSearchHeadClusterSpec(&cr.Spec); err != nil { + if err := validateSearchHeadClusterSpec(&cr); err != nil { t.Errorf("validateSearchHeadClusterSpec() returned error: %v", err) } return getSearchHeadStatefulSet(c, &cr) @@ -583,7 +584,7 @@ func TestGetSearchHeadStatefulSet(t *testing.T) { } func TestGetDeployerStatefulSet(t *testing.T) { - cr := enterprisev1.SearchHeadCluster{ + cr := enterpriseApi.SearchHeadCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -598,7 +599,7 @@ func TestGetDeployerStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := validateSearchHeadClusterSpec(&cr.Spec); err != nil { + if err := validateSearchHeadClusterSpec(&cr); err != nil { t.Errorf("validateSearchHeadClusterSpec() returned error: %v", err) } return getDeployerStatefulSet(c, &cr) @@ -624,3 +625,385 @@ func TestGetDeployerStatefulSet(t *testing.T) { cr.Spec.ServiceAccount = "defaults" test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-deployer","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-deployer-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/apps/apps.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_deployer"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_SEARCH_HEAD_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-1.splunk-stack1-search-head-headless.test.svc.cluster.local,splunk-stack1-search-head-2.splunk-stack1-search-head-headless.test.svc.cluster.local"},{"name":"SPLUNK_SEARCH_HEAD_CAPTAIN_URL","value":"splunk-stack1-search-head-0.splunk-stack1-search-head-headless.test.svc.cluster.local"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-deployer"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"search-head","app.kubernetes.io/instance":"splunk-stack1-deployer","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"deployer","app.kubernetes.io/part-of":"splunk-stack1-search-head"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-deployer-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) } + +func TestAppFrameworkSearchHeadClusterShouldNotFail(t *testing.T) { + cr := enterpriseApi.SearchHeadCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.SearchHeadClusterSpec{ + Replicas: 3, + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret", Type: "s3", Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + _, err = ApplySearchHeadCluster(client, &cr) + if err != nil { + t.Errorf("ApplySearchHeadCluster should be successful") + } +} + +func TestSHCGetAppsListForAWSS3ClientShouldNotFail(t *testing.T) { + cr := enterpriseApi.SearchHeadCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.SearchHeadClusterSpec{ + Replicas: 3, + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + Defaults: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol2", + Scope: "local", + }, + VolList: []enterpriseApi.VolumeSpec{ + { + Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + { + Name: "msos_s2s3_vol2", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london2", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + { + Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local", + }, + }, + { + Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local", + }, + }, + { + Name: "authenticationApps", + Location: "authenticationAppsRepo", + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1", "5055a61b3d1b667a4c3279a381a2e7ae", "19779168370b97d8654424e6c9446dd9"} + Keys := []string{"admin_app.tgz", "security_app.tgz", "authentication_app.tgz"} + Sizes := []int64{10, 20, 30} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[1], + Key: &Keys[1], + LastModified: &randomTime, + Size: &Sizes[1], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[2], + Key: &Keys[2], + LastModified: &randomTime, + Size: &Sizes[2], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := cr.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + var allSuccess bool = true + for index, appSource := range appFrameworkRef.AppSources { + + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + allSuccess = false + continue + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{client: client, + cr: &cr, appFrameworkRef: &cr.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + cl := spltest.MockAWSS3Client{} + cl.Objects = mockAwsObjects[index].Objects + return cl + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + s3Response, err := s3ClientMgr.GetAppsList() + if err != nil { + allSuccess = false + continue + } + + var mockResponse spltest.MockAWSS3Client + mockResponse, err = splclient.ConvertS3Response(s3Response) + if err != nil { + allSuccess = false + continue + } + if mockAwsHandler.GotSourceAppListResponseMap == nil { + mockAwsHandler.GotSourceAppListResponseMap = make(map[string]spltest.MockAWSS3Client) + } + + mockAwsHandler.GotSourceAppListResponseMap[appSource.Name] = mockResponse + } + + if allSuccess == false { + t.Errorf("Unable to get apps list for all the app sources") + } + method := "GetAppsList" + mockAwsHandler.CheckAWSS3Response(t, method) +} + +func TestSHCGetAppsListForAWSS3ClientShouldFail(t *testing.T) { + cr := enterpriseApi.SearchHeadCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.SearchHeadClusterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1"} + Keys := []string{"admin_app.tgz"} + Sizes := []int64{10} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := cr.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + + appSource := appFrameworkRef.AppSources[0] + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + t.Errorf("Unable to get Volume due to error=%s", err) + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{ + client: client, + cr: &cr, + appFrameworkRef: &cr.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + // Purposefully return nil here so that we test the error scenario + return nil + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, + appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, + location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + // Get the mock client + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as there is no S3 secret provided") + } + + // Create empty S3 secret + s3Secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3-secret", + Namespace: "test", + }, + Data: map[string][]byte{}, + } + + client.AddObject(&s3Secret) + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty keys") + } + + s3AccessKey := []byte{'1'} + s3Secret.Data = map[string][]byte{"s3_access_key": s3AccessKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_secret_key") + } + + s3SecretKey := []byte{'2'} + s3Secret.Data = map[string][]byte{"s3_secret_key": s3SecretKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_access_key") + } + + // Create S3 secret + s3Secret = spltest.GetMockS3SecretKeys("s3-secret") + + // This should return an error as we have initialized initFn for s3ClientMgr + // to return a nil client. + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we could not get the S3 client") + } + + s3ClientMgr.initFn = func(region, accessKeyID, secretAccessKey string) interface{} { + // To test the error scenario, do no set the Objects member yet + cl := spltest.MockAWSS3Client{} + return cl + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we have empty objects in MockAWSS3Client") + } +} diff --git a/pkg/splunk/enterprise/standalone.go b/pkg/splunk/enterprise/standalone.go index e605f1df9..5c3716497 100644 --- a/pkg/splunk/enterprise/standalone.go +++ b/pkg/splunk/enterprise/standalone.go @@ -24,13 +24,13 @@ import ( corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/reconcile" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" ) // ApplyStandalone reconciles the StatefulSet for N standalone instances of Splunk Enterprise. -func ApplyStandalone(client splcommon.ControllerClient, cr *enterprisev1.Standalone) (reconcile.Result, error) { +func ApplyStandalone(client splcommon.ControllerClient, cr *enterpriseApi.Standalone) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds result := reconcile.Result{ @@ -38,13 +38,15 @@ func ApplyStandalone(client splcommon.ControllerClient, cr *enterprisev1.Standal RequeueAfter: time.Second * 5, } + scopedLog := log.WithName("ApplyStandalone").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) if cr.Status.ResourceRevMap == nil { cr.Status.ResourceRevMap = make(map[string]string) } // validate and updates defaults for CR - err := validateStandaloneSpec(&cr.Spec) + err := validateStandaloneSpec(cr) if err != nil { + scopedLog.Error(err, "Failed to validate standalone spec") return result, err } @@ -67,9 +69,22 @@ func ApplyStandalone(client splcommon.ControllerClient, cr *enterprisev1.Standal cr.Status.SmartStore = cr.Spec.SmartStore } + // If the app framework is configured then do following things - + // 1. Initialize the S3Clients based on providers + // 2. Check the status of apps on remote storage. + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 { + err := initAndCheckAppInfoStatus(client, cr, &cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + if err != nil { + return result, err + } + } + cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-standalone", cr.GetName()) defer func() { client.Status().Update(context.TODO(), cr) + if err != nil { + scopedLog.Error(err, "Status update failed") + } }() // create or update general config resources @@ -108,6 +123,35 @@ func ApplyStandalone(client splcommon.ControllerClient, cr *enterprisev1.Standal return result, err } + // If we are using appFramework and are scaling up, we should re-populate the + // configMap with all the appSource entries. This is done so that the new pods + // that come up now will have the complete list of all the apps and then can + // download and install all the apps. + // TODO: Improve this logic so that we only recycle the new pod/replica + // and not all the existing pods. + if len(cr.Spec.AppFrameworkConfig.AppSources) != 0 && cr.Spec.Replicas > 1 { + + statefulsetName := GetSplunkStatefulsetName(SplunkStandalone, cr.GetName()) + + isScalingUp, err := splctrl.IsStatefulSetScalingUp(client, cr, statefulsetName, cr.Spec.Replicas) + if err != nil { + return result, err + } else if isScalingUp { + // if we are indeed scaling up, then mark the deploy status to Pending + // for all the app sources so that we add all the app sources in configMap. + appStatusContext := cr.Status.AppContext + for appSrc := range appStatusContext.AppsSrcDeployStatus { + changeAppSrcDeployInfoStatus(appSrc, appStatusContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusComplete, enterpriseApi.DeployStatusPending) + } + + // Now apply the configMap will full app listing. + _, _, err = ApplyAppListingConfigMap(client, cr, &cr.Spec.AppFrameworkConfig, appStatusContext.AppsSrcDeployStatus) + if err != nil { + return result, err + } + } + } + // create or update statefulset statefulSet, err := getStandaloneStatefulSet(client, cr) if err != nil { @@ -124,42 +168,64 @@ func ApplyStandalone(client splcommon.ControllerClient, cr *enterprisev1.Standal // no need to requeue if everything is ready if cr.Status.Phase == splcommon.PhaseReady { + if cr.Status.AppContext.AppsSrcDeployStatus != nil { + markAppsStatusToComplete(cr.Status.AppContext.AppsSrcDeployStatus) + } + err = ApplyMonitoringConsole(client, cr, cr.Spec.CommonSplunkSpec, getStandaloneExtraEnv(cr, cr.Spec.Replicas)) if err != nil { return result, err } - result.Requeue = false + + // Requeue the reconcile after polling interval if we had set the lastAppInfoCheckTime. + if cr.Status.AppContext.LastAppInfoCheckTime != 0 { + result.RequeueAfter = GetNextRequeueTime(cr.Status.AppContext.AppsRepoStatusPollInterval, cr.Status.AppContext.LastAppInfoCheckTime) + } else { + result.Requeue = false + } } return result, nil } // getStandaloneStatefulSet returns a Kubernetes StatefulSet object for Splunk Enterprise standalone instances. -func getStandaloneStatefulSet(client splcommon.ControllerClient, cr *enterprisev1.Standalone) (*appsv1.StatefulSet, error) { +func getStandaloneStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.Standalone) (*appsv1.StatefulSet, error) { // get generic statefulset for Splunk Enterprise objects ss, err := getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkStandalone, cr.Spec.Replicas, []corev1.EnvVar{}) if err != nil { return nil, err } - _, needToSetupSplunkOperatorApp := getSmartstoreConfigMap(client, cr, SplunkStandalone) + smartStoreConfigMap := getSmartstoreConfigMap(client, cr, SplunkStandalone) - if needToSetupSplunkOperatorApp { + if smartStoreConfigMap != nil { setupInitContainer(&ss.Spec.Template, cr.Spec.Image, cr.Spec.ImagePullPolicy, commandForStandaloneSmartstore) } + // Setup App framework init containers + setupAppInitContainers(client, cr, &ss.Spec.Template, &cr.Spec.AppFrameworkConfig) + return ss, nil } // validateStandaloneSpec checks validity and makes default updates to a StandaloneSpec, and returns error if something is wrong. -func validateStandaloneSpec(spec *enterprisev1.StandaloneSpec) error { - if spec.Replicas == 0 { - spec.Replicas = 1 +func validateStandaloneSpec(cr *enterpriseApi.Standalone) error { + if cr.Spec.Replicas == 0 { + cr.Spec.Replicas = 1 } - err := ValidateSplunkSmartstoreSpec(&spec.SmartStore) - if err != nil { - return err + if !reflect.DeepEqual(cr.Status.SmartStore, cr.Spec.SmartStore) { + err := ValidateSplunkSmartstoreSpec(&cr.Spec.SmartStore) + if err != nil { + return err + } + } + + if !reflect.DeepEqual(cr.Status.AppContext.AppFrameworkConfig, cr.Spec.AppFrameworkConfig) { + err := ValidateAppFrameworkSpec(&cr.Spec.AppFrameworkConfig, &cr.Status.AppContext, true) + if err != nil { + return err + } } - return validateCommonSplunkSpec(&spec.CommonSplunkSpec) + return validateCommonSplunkSpec(&cr.Spec.CommonSplunkSpec) } diff --git a/pkg/splunk/enterprise/standalone_test.go b/pkg/splunk/enterprise/standalone_test.go index ad10f47a6..0df1c88bd 100644 --- a/pkg/splunk/enterprise/standalone_test.go +++ b/pkg/splunk/enterprise/standalone_test.go @@ -22,7 +22,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -38,6 +39,7 @@ func TestApplyStandalone(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-standalone-secret-v1"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-standalone-smartstore"}, + {MetaName: "*v1.ConfigMap-test-splunk-stack1-standalone-app-list"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-standalone-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-standalone"}, } @@ -52,9 +54,9 @@ func TestApplyStandalone(t *testing.T) { listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[5], funcCalls[8]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} - updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[6], funcCalls[7], funcCalls[8]}, "Update": {funcCalls[8]}, "List": {listmockCall[0]}} - current := enterprisev1.Standalone{ + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2], funcCalls[3], funcCalls[5], funcCalls[9]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Update": {funcCalls[9]}, "List": {listmockCall[0]}} + current := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -66,7 +68,7 @@ func TestApplyStandalone(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyStandalone(c, cr.(*enterprisev1.Standalone)) + _, err := ApplyStandalone(c, cr.(*enterpriseApi.Standalone)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyStandalone", ¤t, revised, createCalls, updateCalls, reconcile, true) @@ -76,7 +78,7 @@ func TestApplyStandalone(t *testing.T) { revised.ObjectMeta.DeletionTimestamp = ¤tTime revised.ObjectMeta.Finalizers = []string{"enterprise.splunk.com/delete-pvc"} deleteFunc := func(cr splcommon.MetaObject, c splcommon.ControllerClient) (bool, error) { - _, err := ApplyStandalone(c, cr.(*enterprisev1.Standalone)) + _, err := ApplyStandalone(c, cr.(*enterpriseApi.Standalone)) return true, err } splunkDeletionTester(t, revised, deleteFunc) @@ -95,6 +97,7 @@ func TestApplyStandaloneWithSmartstore(t *testing.T) { {MetaName: "*v1.Secret-test-splunk-test-secret"}, {MetaName: "*v1.Secret-test-splunk-stack1-standalone-secret-v1"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-standalone-smartstore"}, + {MetaName: "*v1.ConfigMap-test-splunk-stack1-standalone-app-list"}, {MetaName: "*v1.ConfigMap-test-splunk-stack1-standalone-smartstore"}, {MetaName: "*v1.StatefulSet-test-splunk-stack1-standalone"}, } @@ -109,10 +112,10 @@ func TestApplyStandaloneWithSmartstore(t *testing.T) { listmockCall := []spltest.MockFuncCall{ {ListOpts: listOpts}} - createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[2], funcCalls[6], funcCalls[7], funcCalls[9], funcCalls[12]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} - updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[2], funcCalls[3], funcCalls[4], funcCalls[5], funcCalls[6], funcCalls[7], funcCalls[8], funcCalls[9], funcCalls[10], funcCalls[11], funcCalls[12]}, "Update": {funcCalls[11], funcCalls[12]}, "List": {listmockCall[0]}} + createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[2], funcCalls[6], funcCalls[7], funcCalls[9], funcCalls[13]}, "Update": {funcCalls[0]}, "List": {listmockCall[0]}} + updateCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Update": {funcCalls[12], funcCalls[13]}, "List": {listmockCall[0]}} - current := enterprisev1.Standalone{ + current := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -120,23 +123,23 @@ func TestApplyStandaloneWithSmartstore(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.StandaloneSpec{ + Spec: enterpriseApi.StandaloneSpec{ Replicas: 1, - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -168,14 +171,14 @@ func TestApplyStandaloneWithSmartstore(t *testing.T) { revised := current.DeepCopy() revised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - _, err := ApplyStandalone(c, cr.(*enterprisev1.Standalone)) + _, err := ApplyStandalone(c, cr.(*enterpriseApi.Standalone)) return err } spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplyStandaloneWithSmartstore", ¤t, revised, createCalls, updateCalls, reconcile, true, secret) } func TestGetStandaloneStatefulSet(t *testing.T) { - cr := enterprisev1.Standalone{ + cr := enterpriseApi.Standalone{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -190,7 +193,7 @@ func TestGetStandaloneStatefulSet(t *testing.T) { test := func(want string) { f := func() (interface{}, error) { - if err := validateStandaloneSpec(&cr.Spec); err != nil { + if err := validateStandaloneSpec(&cr); err != nil { t.Errorf("validateStandaloneSpec() returned error: %v", err) } return getStandaloneStatefulSet(c, &cr) @@ -244,7 +247,7 @@ func TestGetStandaloneStatefulSet(t *testing.T) { } func TestApplyStandaloneSmartstoreKeyChangeDetection(t *testing.T) { - current := enterprisev1.Standalone{ + current := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -252,15 +255,15 @@ func TestApplyStandaloneSmartstoreKeyChangeDetection(t *testing.T) { Name: "stack1", Namespace: "test", }, - Spec: enterprisev1.StandaloneSpec{ + Spec: enterpriseApi.StandaloneSpec{ Replicas: 1, - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -302,3 +305,443 @@ func TestApplyStandaloneSmartstoreKeyChangeDetection(t *testing.T) { t.Errorf("Key change was not detected %v", err) } } + +func TestAppFrameworkApplyStandaloneShouldNotFail(t *testing.T) { + cr := enterpriseApi.Standalone{ + ObjectMeta: metav1.ObjectMeta{ + Name: "standalone", + Namespace: "test", + }, + Spec: enterpriseApi.StandaloneSpec{ + Replicas: 1, + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret", Type: "s3", Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + _, err = ApplyStandalone(client, &cr) + if err != nil { + t.Errorf("ApplyStandalone should be successful") + } +} + +func TestAppFrameworkApplyStandaloneScalingUpShouldNotFail(t *testing.T) { + cr := enterpriseApi.Standalone{ + ObjectMeta: metav1.ObjectMeta{ + Name: "standalone", + Namespace: "test", + }, + Spec: enterpriseApi.StandaloneSpec{ + Replicas: 1, + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret", Type: "s3", Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + _, err = ApplyStandalone(client, &cr) + if err != nil { + t.Errorf("ApplyStandalone should be successful") + } + + // now scale up + cr.Spec.Replicas = 2 + _, err = ApplyStandalone(client, &cr) + if err != nil { + t.Errorf("ApplyStandalone should be successful") + } +} + +func TestStandaloneGetAppsListForAWSS3ClientShouldNotFail(t *testing.T) { + cr := enterpriseApi.Standalone{ + ObjectMeta: metav1.ObjectMeta{ + Name: "standalone", + Namespace: "test", + }, + Spec: enterpriseApi.StandaloneSpec{ + Replicas: 1, + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + Defaults: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol2", + Scope: "local", + }, + VolList: []enterpriseApi.VolumeSpec{ + { + Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + { + Name: "msos_s2s3_vol2", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london2", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws", + }, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create S3 secret + s3Secret := spltest.GetMockS3SecretKeys("s3-secret") + + client.AddObject(&s3Secret) + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1", "5055a61b3d1b667a4c3279a381a2e7ae", "19779168370b97d8654424e6c9446dd9"} + Keys := []string{"admin_app.tgz", "security_app.tgz", "authentication_app.tgz"} + Sizes := []int64{10, 20, 30} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[1], + Key: &Keys[1], + LastModified: &randomTime, + Size: &Sizes[1], + StorageClass: &StorageClass, + }, + }, + }, + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[2], + Key: &Keys[2], + LastModified: &randomTime, + Size: &Sizes[2], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := cr.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + var allSuccess bool = true + for index, appSource := range appFrameworkRef.AppSources { + + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + allSuccess = false + continue + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{client: client, + cr: &cr, appFrameworkRef: &cr.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + cl := spltest.MockAWSS3Client{} + cl.Objects = mockAwsObjects[index].Objects + return cl + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + s3Response, err := s3ClientMgr.GetAppsList() + if err != nil { + allSuccess = false + continue + } + + var mockResponse spltest.MockAWSS3Client + mockResponse, err = splclient.ConvertS3Response(s3Response) + if err != nil { + allSuccess = false + continue + } + + if mockAwsHandler.GotSourceAppListResponseMap == nil { + mockAwsHandler.GotSourceAppListResponseMap = make(map[string]spltest.MockAWSS3Client) + } + + mockAwsHandler.GotSourceAppListResponseMap[appSource.Name] = mockResponse + } + + if allSuccess == false { + t.Errorf("Unable to get apps list for all the app sources") + } + method := "GetAppsList" + mockAwsHandler.CheckAWSS3Response(t, method) +} + +func TestStandlaoneGetAppsListForAWSS3ClientShouldFail(t *testing.T) { + cr := enterpriseApi.Standalone{ + ObjectMeta: metav1.ObjectMeta{ + Name: "stack1", + Namespace: "test", + }, + Spec: enterpriseApi.StandaloneSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + // Create namespace scoped secret + _, err := splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf(err.Error()) + } + + splclient.RegisterS3Client("aws") + + Etags := []string{"cc707187b036405f095a8ebb43a782c1"} + Keys := []string{"admin_app.tgz"} + Sizes := []int64{10} + StorageClass := "STANDARD" + randomTime := time.Date(2021, time.May, 1, 23, 23, 0, 0, time.UTC) + + mockAwsHandler := spltest.MockAWSS3Handler{} + + mockAwsObjects := []spltest.MockAWSS3Client{ + { + Objects: []*spltest.MockAWSS3Object{ + { + Etag: &Etags[0], + Key: &Keys[0], + LastModified: &randomTime, + Size: &Sizes[0], + StorageClass: &StorageClass, + }, + }, + }, + } + + appFrameworkRef := cr.Spec.AppFrameworkConfig + + mockAwsHandler.AddObjects(appFrameworkRef, mockAwsObjects...) + + var vol enterpriseApi.VolumeSpec + + appSource := appFrameworkRef.AppSources[0] + vol, err = splclient.GetAppSrcVolume(appSource, &appFrameworkRef) + if err != nil { + t.Errorf("Unable to get Volume due to error=%s", err) + } + + // Update the GetS3Client with our mock call which initializes mock AWS client + getClientWrapper := splclient.S3Clients[vol.Provider] + getClientWrapper.SetS3ClientFuncPtr(vol.Provider, splclient.NewMockAWSS3Client) + + s3ClientMgr := &S3ClientManager{ + client: client, + cr: &cr, + appFrameworkRef: &cr.Spec.AppFrameworkConfig, + vol: &vol, + location: appSource.Location, + initFn: func(region, accessKeyID, secretAccessKey string) interface{} { + // Purposefully return nil here so that we test the error scenario + return nil + }, + getS3Client: func(client splcommon.ControllerClient, cr splcommon.MetaObject, + appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, + location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + // Get the mock client + c, err := GetRemoteStorageClient(client, cr, appFrameworkRef, vol, location, fn) + return c, err + }, + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as there is no S3 secret provided") + } + + // Create empty S3 secret + s3Secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "s3-secret", + Namespace: "test", + }, + Data: map[string][]byte{}, + } + + client.AddObject(&s3Secret) + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty keys") + } + + s3AccessKey := []byte{'1'} + s3Secret.Data = map[string][]byte{"s3_access_key": s3AccessKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_secret_key") + } + + s3SecretKey := []byte{'2'} + s3Secret.Data = map[string][]byte{"s3_secret_key": s3SecretKey} + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as S3 secret has empty s3_access_key") + } + + // Create S3 secret + s3Secret = spltest.GetMockS3SecretKeys("s3-secret") + + // This should return an error as we have initialized initFn for s3ClientMgr + // to return a nil client. + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we could not get the S3 client") + } + + s3ClientMgr.initFn = func(region, accessKeyID, secretAccessKey string) interface{} { + // To test the error scenario, do no set the Objects member yet + cl := spltest.MockAWSS3Client{} + return cl + } + + _, err = s3ClientMgr.GetAppsList() + if err == nil { + t.Errorf("GetAppsList should have returned error as we have empty objects in MockAWSS3Client") + } +} diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 6230ffb2a..60e3eac8a 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -16,14 +16,19 @@ package enterprise import ( "fmt" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" logf "sigs.k8s.io/controller-runtime/pkg/log" - //"github.com/go-logr/stdr" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" @@ -32,8 +37,63 @@ import ( // kubernetes logger used by splunk.enterprise package var log = logf.Log.WithName("splunk.enterprise") +// GetRemoteStorageClient returns the corresponding S3Client +func GetRemoteStorageClient(client splcommon.ControllerClient, cr splcommon.MetaObject, appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, location string, fn splclient.GetInitFunc) (splclient.SplunkS3Client, error) { + + scopedLog := log.WithName("GetRemoteStorageClient").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + + s3Client := splclient.SplunkS3Client{} + //use the provider name to get the corresponding function pointer + getClientWrapper := splclient.S3Clients[vol.Provider] + getClient := getClientWrapper.GetS3ClientFuncPtr() + + appSecretRef := vol.SecretRef + s3ClientSecret, err := splutil.GetSecretByName(client, cr, appSecretRef) + if err != nil { + return s3Client, err + } + + // Get access keys + accessKeyID := string(s3ClientSecret.Data["s3_access_key"]) + secretAccessKey := string(s3ClientSecret.Data["s3_secret_key"]) + + if accessKeyID == "" { + err = fmt.Errorf("accessKey missing") + return s3Client, err + } + if secretAccessKey == "" { + err = fmt.Errorf("S3 Secret Key is missing") + return s3Client, err + } + + // Get the bucket name form the "path" field + bucket := strings.Split(vol.Path, "/")[0] + + //Get the prefix from the "path" field + basePrefix := strings.TrimPrefix(vol.Path, bucket+"/") + // if vol.Path contains just the bucket name(i.e without ending "/"), TrimPrefix returns the vol.Path + // So, just reset the basePrefix to null + if basePrefix == bucket { + basePrefix = "" + } + + // Join takes care of merging two paths and returns a clean result + // Ex. ("a/b" + "c"), ("a/b/" + "c"), ("a/b/" + "/c"), ("a/b/" + "/c"), ("a/b//", + "c/././") ("a/b/../b", + "c/../c") all are joined as "a/b/c" + prefix := filepath.Join(basePrefix, location) + "/" + + scopedLog.Info("Creating the client", "volume", vol.Name, "bucket", bucket, "bucket path", prefix) + + s3Client.Client, err = getClient(bucket, accessKeyID, secretAccessKey, prefix, prefix /* startAfter*/, vol.Endpoint, fn) + if err != nil { + scopedLog.Error(err, "Failed to get the S3 client") + return s3Client, err + } + + return s3Client, nil +} + // ApplySplunkConfig reconciles the state of Kubernetes Secrets, ConfigMaps and other general settings for Splunk Enterprise instances. -func ApplySplunkConfig(client splcommon.ControllerClient, cr splcommon.MetaObject, spec enterprisev1.CommonSplunkSpec, instanceType InstanceType) (*corev1.Secret, error) { +func ApplySplunkConfig(client splcommon.ControllerClient, cr splcommon.MetaObject, spec enterpriseApi.CommonSplunkSpec, instanceType InstanceType) (*corev1.Secret, error) { var err error // Creates/updates the namespace scoped "splunk-secrets" K8S secret object @@ -72,7 +132,7 @@ func getIndexerExtraEnv(cr splcommon.MetaObject, replicas int32) []corev1.EnvVar } // getClusterMasterExtraEnv returns extra environment variables used by indexer clusters -func getClusterMasterExtraEnv(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec) []corev1.EnvVar { +func getClusterMasterExtraEnv(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec) []corev1.EnvVar { return []corev1.EnvVar{ { Name: "SPLUNK_CLUSTER_MASTER_URL", @@ -92,7 +152,7 @@ func getStandaloneExtraEnv(cr splcommon.MetaObject, replicas int32) []corev1.Env } // getLicenseMasterURL returns URL of license master -func getLicenseMasterURL(cr splcommon.MetaObject, spec *enterprisev1.CommonSplunkSpec) []corev1.EnvVar { +func getLicenseMasterURL(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec) []corev1.EnvVar { if spec.LicenseMasterRef.Name != "" { licenseMasterURL := GetSplunkServiceName(SplunkLicenseMaster, spec.LicenseMasterRef.Name, false) if spec.LicenseMasterRef.Namespace != "" { @@ -114,7 +174,7 @@ func getLicenseMasterURL(cr splcommon.MetaObject, spec *enterprisev1.CommonSplun } // getSearchHeadExtraEnv returns extra environment variables used by search head clusters -func getSearchHeadEnv(cr *enterprisev1.SearchHeadCluster) []corev1.EnvVar { +func getSearchHeadEnv(cr *enterpriseApi.SearchHeadCluster) []corev1.EnvVar { // get search head env variables with deployer env := getSearchHeadExtraEnv(cr, cr.Spec.Replicas) @@ -140,7 +200,7 @@ func getSearchHeadExtraEnv(cr splcommon.MetaObject, replicas int32) []corev1.Env } // GetSmartstoreRemoteVolumeSecrets is used to retrieve S3 access key and secrete keys. -func GetSmartstoreRemoteVolumeSecrets(volume enterprisev1.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterprisev1.SmartStoreSpec) (string, string, string, error) { +func GetSmartstoreRemoteVolumeSecrets(volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec) (string, string, string, error) { namespaceScopedSecret, err := splutil.GetSecretByName(client, cr, volume.SecretRef) if err != nil { return "", "", "", err @@ -160,9 +220,113 @@ func GetSmartstoreRemoteVolumeSecrets(volume enterprisev1.VolumeSpec, client spl return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil } +// ApplyAppListingConfigMap creates the configMap with two entries: +// (1) app-list-local.yaml +// (2) app-list-cluster.yaml +// Once the configMap is mounted on the Pod, Ansible handles the apps listed in these files +// ToDo: Deletes to be handled for phase-3 +func ApplyAppListingConfigMap(client splcommon.ControllerClient, cr splcommon.MetaObject, + appConf *enterpriseApi.AppFrameworkSpec, appsSrcDeployStatus map[string]enterpriseApi.AppSrcDeployInfo) (*corev1.ConfigMap, bool, error) { + + var err error + var crKind string + var configMapDataChanged bool + crKind = cr.GetObjectKind().GroupVersionKind().Kind + + scopedLog := log.WithName("ApplyAppListingConfigMap").WithValues("kind", crKind, "name", cr.GetName(), "namespace", cr.GetNamespace()) + + mapAppListing := make(map[string]string) + + // Locally scoped apps for CM/Deployer require the latest splunk-ansible with apps_location_local. Prior to this, + // there was no method to install local apps for these roles. If the apps_location_local variable is not available, + // it will be ignored and revert back to no locally scoped apps for CM/Deployer. + yamlConfIdcHeader := fmt.Sprintf(`splunk: + app_paths_install: + idxc:`) + + yamlConfShcHeader := fmt.Sprintf(`splunk: + app_paths_install: + shc:`) + + yamlConfLocalHeader := fmt.Sprintf(`splunk: + app_paths_install: + default:`) + + var localAppsConf, clusterAppsConf string + if crKind == "ClusterMaster" { + clusterAppsConf = yamlConfIdcHeader + } else if crKind == "SearchHeadCluster" { + clusterAppsConf = yamlConfShcHeader + } else { + clusterAppsConf = "" + } + + localAppsConf = yamlConfLocalHeader + + var mapKeys []string + + // Map order is not guaranteed, so use the sorted keys to go through the map entries + for appSrc := range appsSrcDeployStatus { + mapKeys = append(mapKeys, appSrc) + } + sort.Strings(mapKeys) + + for _, appSrc := range mapKeys { + appDeployList := appsSrcDeployStatus[appSrc].AppDeploymentInfoList + + switch scope := getAppSrcScope(appConf, appSrc); scope { + case "local": + for idx := range appDeployList { + if appDeployList[idx].DeployStatus == enterpriseApi.DeployStatusPending && + appDeployList[idx].RepoState == enterpriseApi.RepoStateActive { + localAppsConf = fmt.Sprintf(`%s + - "/init-apps/%s/%s"`, localAppsConf, appSrc, appDeployList[idx].AppName) + } + } + + case "cluster": + for idx := range appDeployList { + if appDeployList[idx].DeployStatus == enterpriseApi.DeployStatusPending && + appDeployList[idx].RepoState == enterpriseApi.RepoStateActive { + clusterAppsConf = fmt.Sprintf(`%s + - "/init-apps/%s/%s"`, clusterAppsConf, appSrc, appDeployList[idx].AppName) + } + } + + default: + scopedLog.Error(nil, "Invalid scope detected") + } + } + + // Don't update the configMap if there is nothing to write. + if localAppsConf != yamlConfLocalHeader { + mapAppListing["app-list-local.yaml"] = localAppsConf + } + + if clusterAppsConf != yamlConfIdcHeader && clusterAppsConf != yamlConfShcHeader && clusterAppsConf != "" { + mapAppListing["app-list-cluster.yaml"] = clusterAppsConf + } + + // Create App list config map + configMapName := GetSplunkAppsConfigMapName(cr.GetName(), crKind) + appListingConfigMap := splctrl.PrepareConfigMap(configMapName, cr.GetNamespace(), mapAppListing) + + appListingConfigMap.SetOwnerReferences(append(appListingConfigMap.GetOwnerReferences(), splcommon.AsOwner(cr, true))) + + if len(appListingConfigMap.Data) > 0 { + configMapDataChanged, err = splctrl.ApplyConfigMap(client, appListingConfigMap) + + if err != nil { + return nil, configMapDataChanged, err + } + } + + return appListingConfigMap, configMapDataChanged, nil +} + // ApplySmartstoreConfigMap creates the configMap with Smartstore config in INI format func ApplySmartstoreConfigMap(client splcommon.ControllerClient, cr splcommon.MetaObject, - smartstore *enterprisev1.SmartStoreSpec) (*corev1.ConfigMap, bool, error) { + smartstore *enterpriseApi.SmartStoreSpec) (*corev1.ConfigMap, bool, error) { var crKind string var configMapDataChanged bool @@ -202,7 +366,8 @@ func ApplySmartstoreConfigMap(client splcommon.ControllerClient, cr splcommon.Me mapSplunkConfDetails["server.conf"] = iniServerConf // Create smartstore config consisting indexes.conf - SplunkOperatorAppConfigMap := prepareSplunkSmartstoreConfigMap(cr.GetName(), cr.GetNamespace(), crKind, mapSplunkConfDetails) + configMapName := GetSplunkSmartstoreConfigMapName(cr.GetName(), crKind) + SplunkOperatorAppConfigMap := splctrl.PrepareConfigMap(configMapName, cr.GetNamespace(), mapSplunkConfDetails) SplunkOperatorAppConfigMap.SetOwnerReferences(append(SplunkOperatorAppConfigMap.GetOwnerReferences(), splcommon.AsOwner(cr, true))) configMapDataChanged, err = splctrl.ApplyConfigMap(client, SplunkOperatorAppConfigMap) @@ -213,7 +378,6 @@ func ApplySmartstoreConfigMap(client splcommon.ControllerClient, cr splcommon.Me mapSplunkConfDetails[configToken] = fmt.Sprintf(`%d`, time.Now().Unix()) // Apply the configMap with a fresh token - SplunkOperatorAppConfigMap = prepareSplunkSmartstoreConfigMap(cr.GetName(), cr.GetNamespace(), crKind, mapSplunkConfDetails) configMapDataChanged, err = splctrl.ApplyConfigMap(client, SplunkOperatorAppConfigMap) if err != nil { return nil, configMapDataChanged, err @@ -251,7 +415,7 @@ func setupInitContainer(podTemplateSpec *corev1.PodTemplateSpec, Image string, i // DeleteOwnerReferencesForResources used to delete any outstanding owner references // Ideally we should be removing the owner reference wherever the CR is not controller for the resource -func DeleteOwnerReferencesForResources(client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterprisev1.SmartStoreSpec) error { +func DeleteOwnerReferencesForResources(client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec) error { var err error scopedLog := log.WithName("DeleteOwnerReferencesForResources").WithValues("kind", cr.GetObjectKind().GroupVersionKind().Kind, "name", cr.GetName(), "namespace", cr.GetNamespace()) @@ -272,7 +436,7 @@ func DeleteOwnerReferencesForResources(client splcommon.ControllerClient, cr spl // DeleteOwnerReferencesForS3SecretObjects deletes owner references for all the secret objects referred by smartstore // remote volume end points -func DeleteOwnerReferencesForS3SecretObjects(client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterprisev1.SmartStoreSpec) error { +func DeleteOwnerReferencesForS3SecretObjects(client splcommon.ControllerClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec) error { scopedLog := log.WithName("DeleteOwnerReferencesForS3Secrets").WithValues("kind", cr.GetObjectKind().GroupVersionKind().Kind, "name", cr.GetName(), "namespace", cr.GetNamespace()) var err error = nil @@ -292,3 +456,521 @@ func DeleteOwnerReferencesForS3SecretObjects(client splcommon.ControllerClient, return err } + +// S3ClientManager is used to manage all the S3 storage clients and their connections. +type S3ClientManager struct { + client splcommon.ControllerClient + cr splcommon.MetaObject + appFrameworkRef *enterpriseApi.AppFrameworkSpec + vol *enterpriseApi.VolumeSpec + location string + initFn splclient.GetInitFunc + getS3Client func(client splcommon.ControllerClient, cr splcommon.MetaObject, + appFrameworkRef *enterpriseApi.AppFrameworkSpec, vol *enterpriseApi.VolumeSpec, + location string, fp splclient.GetInitFunc) (splclient.SplunkS3Client, error) +} + +// GetAppsList gets the apps list +func (s3mgr *S3ClientManager) GetAppsList() (splclient.S3Response, error) { + var s3Response splclient.S3Response + + c, err := s3mgr.getS3Client(s3mgr.client, s3mgr.cr, s3mgr.appFrameworkRef, s3mgr.vol, s3mgr.location, s3mgr.initFn) + if err != nil { + return s3Response, err + } + + s3Response, err = c.Client.GetAppsList() + if err != nil { + return s3Response, err + } + return s3Response, nil +} + +// GetAppListFromS3Bucket gets the list of apps from remote storage. +func GetAppListFromS3Bucket(client splcommon.ControllerClient, cr splcommon.MetaObject, appFrameworkRef *enterpriseApi.AppFrameworkSpec) (map[string]splclient.S3Response, error) { + + scopedLog := log.WithName("GetAppListFromS3Bucket").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + + sourceToAppListMap := make(map[string]splclient.S3Response) + + scopedLog.Info("Getting the list of apps from remote storage...") + + var s3Response splclient.S3Response + var vol enterpriseApi.VolumeSpec + var err error + var allSuccess bool = true + + for _, appSource := range appFrameworkRef.AppSources { + vol, err = splclient.GetAppSrcVolume(appSource, appFrameworkRef) + if err != nil { + allSuccess = false + continue + } + + s3ClientWrapper := splclient.S3Clients[vol.Provider] + initFunc := s3ClientWrapper.GetS3ClientInitFuncPtr() + s3ClientMgr := S3ClientManager{ + client: client, + cr: cr, + appFrameworkRef: appFrameworkRef, + vol: &vol, + location: appSource.Location, + initFn: initFunc, + getS3Client: GetRemoteStorageClient, + } + + // Now, get the apps list from remote storage + s3Response, err = s3ClientMgr.GetAppsList() + if err != nil { + // move on to the next appSource if we are not able to get apps list + scopedLog.Error(err, "Unable to get apps list", "appSource", appSource.Name) + allSuccess = false + continue + } + + sourceToAppListMap[appSource.Name] = s3Response + } + + if allSuccess == false { + err = fmt.Errorf("Unable to get apps list from remote storage list for all the apps") + } + + return sourceToAppListMap, err +} + +// checkIfAnAppIsActiveOnRemoteStore checks if the App is listed as part of the AppSrc listing +func checkIfAnAppIsActiveOnRemoteStore(appName string, list []*splclient.RemoteObject) bool { + for i := range list { + if strings.HasSuffix(*list[i].Key, appName) { + return true + } + } + + return false +} + +// checkIfAppSrcExistsWithRemoteListing checks if a given AppSrc is part of the remote listing +func checkIfAppSrcExistsWithRemoteListing(appSrc string, remoteObjListingMap map[string]splclient.S3Response) bool { + if _, ok := remoteObjListingMap[appSrc]; ok { + return true + } + + return false +} + +// changeAppSrcDeployInfoStatus sets the new status to all the apps in an AppSrc if the given repo state and deploy status matches +// primarly used in Phase-3 +func changeAppSrcDeployInfoStatus(appSrc string, appSrcDeployStatus map[string]enterpriseApi.AppSrcDeployInfo, repoState enterpriseApi.AppRepoState, oldDeployStatus enterpriseApi.AppDeploymentStatus, newDeployStatus enterpriseApi.AppDeploymentStatus) { + scopedLog := log.WithName("changeAppSrcDeployInfoStatus").WithValues("Called for AppSource: ", appSrc, "repoState", repoState, "oldDeployStatus", oldDeployStatus, "newDeployStatus", newDeployStatus) + + if appSrcDeploymentInfo, ok := appSrcDeployStatus[appSrc]; ok { + appDeployInfoList := appSrcDeploymentInfo.AppDeploymentInfoList + for idx := range appDeployInfoList { + // Modify the app status if the state and status matches + if appDeployInfoList[idx].RepoState == repoState && appDeployInfoList[idx].DeployStatus == oldDeployStatus { + appDeployInfoList[idx].DeployStatus = newDeployStatus + } + } + + // Update the Map entry again + appSrcDeployStatus[appSrc] = appSrcDeploymentInfo + scopedLog.Info("Complete") + } else { + // Ideally this should never happen, check if the "IsDeploymentInProgress" flag is handled correctly or not + scopedLog.Error(nil, "Could not find the App Source in App context") + } +} + +// setStateAndStatusForAppDeployInfo sets the state and status for an App +func setStateAndStatusForAppDeployInfo(appDeployInfo *enterpriseApi.AppDeploymentInfo, repoState enterpriseApi.AppRepoState, deployStatus enterpriseApi.AppDeploymentStatus) { + appDeployInfo.RepoState = repoState + appDeployInfo.DeployStatus = deployStatus +} + +// setStateAndStatusForAppDeployInfoList sets the state and status for a given list of Apps +func setStateAndStatusForAppDeployInfoList(appDeployList []enterpriseApi.AppDeploymentInfo, state enterpriseApi.AppRepoState, status enterpriseApi.AppDeploymentStatus) (bool, []enterpriseApi.AppDeploymentInfo) { + var modified bool + for idx := range appDeployList { + setStateAndStatusForAppDeployInfo(&appDeployList[idx], state, status) + modified = true + } + + return modified, appDeployList +} + +// handleAppRepoChanges parses the remote storage listing and updates the repoState and deployStatus accordingly +// client and cr are used when we put the glue logic to hand-off to the side car +func handleAppRepoChanges(client splcommon.ControllerClient, cr splcommon.MetaObject, + appDeployContext *enterpriseApi.AppDeploymentContext, remoteObjListingMap map[string]splclient.S3Response, appFrameworkConfig *enterpriseApi.AppFrameworkSpec) error { + crKind := cr.GetObjectKind().GroupVersionKind().Kind + scopedLog := log.WithName("handleAppRepoChanges").WithValues("kind", crKind, "name", cr.GetName(), "namespace", cr.GetNamespace()) + var err error + + scopedLog.Info("received App listing", "for App sources", len(remoteObjListingMap)) + if remoteObjListingMap == nil || len(remoteObjListingMap) == 0 { + scopedLog.Error(nil, "remoteObjectList is empty. Any apps that are already deployed will be disabled") + } + + // Check if the appSource is still valid in the config + for appSrc := range remoteObjListingMap { + if !CheckIfAppSrcExistsInConfig(appFrameworkConfig, appSrc) { + err = fmt.Errorf("App source: %s no more exists, this should never happen", appSrc) + return err + } + } + + // ToDo: Ideally, this check should go to the reconcile entry point once the glue logic in place. + if appDeployContext.AppsSrcDeployStatus == nil { + appDeployContext.AppsSrcDeployStatus = make(map[string]enterpriseApi.AppSrcDeployInfo) + } + + // 1. Check if the AppSrc is deleted in latest config, OR missing with the remote listing. + for appSrc, appSrcDeploymentInfo := range appDeployContext.AppsSrcDeployStatus { + // If the AppSrc is missing mark all the corresponding apps for deletion + if !CheckIfAppSrcExistsInConfig(appFrameworkConfig, appSrc) || + !checkIfAppSrcExistsWithRemoteListing(appSrc, remoteObjListingMap) { + scopedLog.Info("App change", "deleting/disabling all the apps for App source: ", appSrc, "Reason: App source is mising in config or remote listing") + curAppDeployList := appSrcDeploymentInfo.AppDeploymentInfoList + var modified bool + + modified, appSrcDeploymentInfo.AppDeploymentInfoList = setStateAndStatusForAppDeployInfoList(curAppDeployList, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending) + + if modified { + appDeployContext.IsDeploymentInProgress = true + // Finally update the Map entry with latest info + appDeployContext.AppsSrcDeployStatus[appSrc] = appSrcDeploymentInfo + } + } + } + + // 2. Go through each AppSrc from the remote listing + for appSrc, s3Response := range remoteObjListingMap { + // 2.1 Mark Apps for deletion if they are missing in remote listing + appSrcDeploymentInfo, appSrcExistsLocally := appDeployContext.AppsSrcDeployStatus[appSrc] + + if appSrcExistsLocally { + currentList := appSrcDeploymentInfo.AppDeploymentInfoList + for appIdx := range currentList { + if !checkIfAnAppIsActiveOnRemoteStore(currentList[appIdx].AppName, s3Response.Objects) { + scopedLog.Info("App change", "deleting/disabling the App: ", currentList[appIdx].AppName, "as it is missing in the remote listing", nil) + setStateAndStatusForAppDeployInfo(¤tList[appIdx], enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending) + appDeployContext.IsDeploymentInProgress = true + } + } + } + + // 2.2 Check for any App changes(Ex. A new App source, a new App added/updated) + if AddOrUpdateAppSrcDeploymentInfoList(&appSrcDeploymentInfo, s3Response.Objects) { + appDeployContext.IsDeploymentInProgress = true + } + + // Finally update the Map entry with latest info + appDeployContext.AppsSrcDeployStatus[appSrc] = appSrcDeploymentInfo + } + + return err +} + +// isAppExtentionValid checks if an app extention is supported or not +func isAppExtentionValid(receivedKey string) bool { + appExtIdx := strings.LastIndex(receivedKey, ".") + if appExtIdx < 0 { + return false + } + + switch appExt := receivedKey[appExtIdx+1:]; appExt { + case "spl": + return true + + case "tgz": + return true + + default: + return false + } +} + +// AddOrUpdateAppSrcDeploymentInfoList modifies the App deployment status as perceived from the remote object listing +func AddOrUpdateAppSrcDeploymentInfoList(appSrcDeploymentInfo *enterpriseApi.AppSrcDeployInfo, remoteS3ObjList []*splclient.RemoteObject) bool { + scopedLog := log.WithName("AddOrUpdateAppSrcDeploymentInfoList").WithValues("Called with length: ", len(remoteS3ObjList)) + + var found bool + var appName string + var newAppInfoList []enterpriseApi.AppDeploymentInfo + var appChangesDetected bool + var appDeployInfo enterpriseApi.AppDeploymentInfo + + for _, remoteObj := range remoteS3ObjList { + receivedKey := *remoteObj.Key + if !isAppExtentionValid(receivedKey) { + scopedLog.Error(nil, "App name Parsing: Ignoring the key: ", receivedKey, "with invalid extention") + continue + } + + nameAt := strings.LastIndex(receivedKey, "/") + appName = receivedKey[nameAt+1:] + + // Now update App status as seen in the remote listing + found = false + appList := appSrcDeploymentInfo.AppDeploymentInfoList + for idx := range appList { + if appList[idx].AppName == appName { + found = true + if appList[idx].ObjectHash != *remoteObj.Etag || appList[idx].RepoState == enterpriseApi.RepoStateDeleted { + scopedLog.Info("App change detected.", "App name: ", appName, "marking for an update") + appList[idx].ObjectHash = *remoteObj.Etag + appList[idx].DeployStatus = enterpriseApi.DeployStatusPending + + // Make the state active for an app that was deleted earlier, and got activated again + if appList[idx].RepoState == enterpriseApi.RepoStateDeleted { + scopedLog.Info("App change", "enabling the App name: ", appName, "that was previously disabled/deleted") + appList[idx].RepoState = enterpriseApi.RepoStateActive + } + appChangesDetected = true + } + + // Found the App and finished the needed work. we can break here + break + } + } + + // Update our local list if it is a new app + if !found { + scopedLog.Info("New App", "found: ", appName) + appDeployInfo.AppName = appName + appDeployInfo.ObjectHash = *remoteObj.Etag + appDeployInfo.RepoState = enterpriseApi.RepoStateActive + appDeployInfo.DeployStatus = enterpriseApi.DeployStatusPending + + // Add it to a seperate list so that we don't loop through the newly added entries + newAppInfoList = append(newAppInfoList, appDeployInfo) + appChangesDetected = true + } + } + + // Add the newly discovered Apps to the App source group + appSrcDeploymentInfo.AppDeploymentInfoList = append(appSrcDeploymentInfo.AppDeploymentInfoList, newAppInfoList...) + + return appChangesDetected +} + +// markAppsStatusToComplete sets the required status for a given state. +// Gets called from glue logic based on how we want to hand-off to init/side car, and look for the return status +// For now, two possible cases: +// 1. Completing the changes for Deletes. Called with state=AppStateDeleted, and status=DeployStatusPending +// 2. Completing the changes for Active(Apps newly added, apps modified, Apps previously deleted, and now active). +// Note:- Used in only for Phase-2 +func markAppsStatusToComplete(appSrcDeplymentStatus map[string]enterpriseApi.AppSrcDeployInfo) error { + var err error + scopedLog := log.WithName("markAppsStatusToComplete") + + // ToDo: Passing appSrcDeplymentStatus is redundant, but this function will go away in phase-3, so ok for now. + for appSrc := range appSrcDeplymentStatus { + changeAppSrcDeployInfoStatus(appSrc, appSrcDeplymentStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending, enterpriseApi.DeployStatusComplete) + changeAppSrcDeployInfoStatus(appSrc, appSrcDeplymentStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending, enterpriseApi.DeployStatusComplete) + } + + scopedLog.Info("Marked the App deployment status to complete") + // ToDo: sgontla: Caller of this API also needs to set "IsDeploymentInProgress = false" once after completing this function call for all the app sources + + return err +} + +// setupAppInitContainers creates the necessary shared volume and init containers to download all +// app packages in the appSources configured and make them locally available to the Splunk instance. +func setupAppInitContainers(client splcommon.ControllerClient, cr splcommon.MetaObject, podTemplateSpec *corev1.PodTemplateSpec, appFrameworkConfig *enterpriseApi.AppFrameworkSpec) { + scopedLog := log.WithName("setupAppInitContainers") + // Create shared volume and init containers for App Framework + if len(appFrameworkConfig.AppSources) > 0 { + // Create volume to shared between init and Splunk container to contain downloaded apps + emptyVolumeSource := corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + } + + initVol := corev1.Volume{ + Name: appVolumeMntName, + VolumeSource: emptyVolumeSource, + } + + podTemplateSpec.Spec.Volumes = append(podTemplateSpec.Spec.Volumes, initVol) + + // Add init apps mount to Splunk container + initVolumeSpec := corev1.VolumeMount{ + Name: appVolumeMntName, + MountPath: appBktMnt, + } + + // This assumes the Splunk instance container is Containers[0], which I *believe* is valid + podTemplateSpec.Spec.Containers[0].VolumeMounts = append(podTemplateSpec.Spec.Containers[0].VolumeMounts, initVolumeSpec) + + // Add app framework init containers per app source and attach the init volume + for i, appSrc := range appFrameworkConfig.AppSources { + // Get volume info from appSrc + + var volSpecPos int + var err error + if appSrc.VolName != "" { + volSpecPos, err = splclient.CheckIfVolumeExists(appFrameworkConfig.VolList, appSrc.VolName) + } else { + volSpecPos, err = splclient.CheckIfVolumeExists(appFrameworkConfig.VolList, appFrameworkConfig.Defaults.VolName) + } + + if err != nil { + // Invalid appFramework config. This shouldn't happen + scopedLog.Info("Invalid appSrc volume spec, moving to the next one", "appSrc.VolName", appSrc.VolName, "err", err) + continue + } + appRepoVol := appFrameworkConfig.VolList[volSpecPos] + + s3ClientWrapper := splclient.S3Clients[appRepoVol.Provider] + initFunc := s3ClientWrapper.GetS3ClientInitFuncPtr() + // Use the provider name to get the corresponding function pointer + s3Client, err := GetRemoteStorageClient(client, cr, appFrameworkConfig, &appRepoVol, appSrc.Location, initFunc) + if err != nil { + // move on to the next appSource if we are not able to get the required client + scopedLog.Info("Invalid Remote Storage Client", "appRepoVol.Name", appRepoVol.Name, "err", err) + continue + } + + // Prepare app source/repo values + appBkt := appRepoVol.Path + appS3Endpoint := appRepoVol.Endpoint + appSecretRef := appRepoVol.SecretRef + appSrcName := appSrc.Name + appSrcPath := appSrc.Location + appSrcScope := getAppSrcScope(appFrameworkConfig, appSrc.Name) + initContainerName := strings.ToLower(fmt.Sprintf(initContainerTemplate, appSrcName, i, appSrcScope)) + + // Setup init container + initContainerSpec := corev1.Container{ + Image: s3Client.Client.GetInitContainerImage(), + ImagePullPolicy: "IfNotPresent", + Name: initContainerName, + Args: s3Client.Client.GetInitContainerCmd(appS3Endpoint, appBkt, appSrcPath, appSrcName, appBktMnt), + Env: []corev1.EnvVar{ + { + Name: "AWS_ACCESS_KEY_ID", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: appSecretRef, + }, + Key: s3AccessKey, + }, + }, + }, + { + Name: "AWS_SECRET_ACCESS_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: appSecretRef, + }, + Key: s3SecretKey, + }, + }, + }, + }, + } + + // Add mount to initContainer, same mount used for Splunk instance container as well + initContainerSpec.VolumeMounts = []corev1.VolumeMount{ + { + Name: appVolumeMntName, + MountPath: appBktMnt, + }, + } + podTemplateSpec.Spec.InitContainers = append(podTemplateSpec.Spec.InitContainers, initContainerSpec) + } + } +} + +// SetLastAppInfoCheckTime sets the last check time to current time +func SetLastAppInfoCheckTime(appInfoStatus *enterpriseApi.AppDeploymentContext) { + scopedLog := log.WithName("SetLastAppInfoCheckTime") + currentEpoch := time.Now().Unix() + + scopedLog.Info("Setting the LastAppInfoCheckTime to current time", "current epoch time", currentEpoch) + + appInfoStatus.LastAppInfoCheckTime = currentEpoch +} + +// HasAppRepoCheckTimerExpired checks if the polling interval has expired +func HasAppRepoCheckTimerExpired(appInfoContext *enterpriseApi.AppDeploymentContext) bool { + scopedLog := log.WithName("HasAppRepoCheckTimerExpired") + currentEpoch := time.Now().Unix() + + isTimerExpired := appInfoContext.LastAppInfoCheckTime+appInfoContext.AppsRepoStatusPollInterval <= currentEpoch + if isTimerExpired == true { + scopedLog.Info("App repo polling interval timer has expired", "LastAppInfoCheckTime", strconv.FormatInt(appInfoContext.LastAppInfoCheckTime, 10), "current epoch time", strconv.FormatInt(currentEpoch, 10)) + } + + return isTimerExpired +} + +// GetNextRequeueTime gets the next reconcile requeue time based on the appRepoPollInterval. +// There can be some time elapsed between when we first set lastAppInfoCheckTime and when the CR is in Ready state. +// Hence we need to subtract the delta time elapsed from the actual polling interval, +// so that the next reconile would happen at the right time. +func GetNextRequeueTime(appRepoPollInterval, lastCheckTime int64) time.Duration { + scopedLog := log.WithName("GetNextRequeueTime") + currentEpoch := time.Now().Unix() + + var nextRequeueTimeInSec int64 + nextRequeueTimeInSec = appRepoPollInterval - (currentEpoch - lastCheckTime) + + scopedLog.Info("Getting next requeue time", "LastAppInfoCheckTime", lastCheckTime, "Current Epoch time", currentEpoch, "nextRequeueTimeInSec", nextRequeueTimeInSec) + + return time.Second * (time.Duration(nextRequeueTimeInSec)) +} + +// initAndCheckAppInfoStatus initializes the S3Clients and checks the status of apps on remote storage. +func initAndCheckAppInfoStatus(client splcommon.ControllerClient, cr splcommon.MetaObject, appFrameworkConf *enterpriseApi.AppFrameworkSpec, appStatusContext *enterpriseApi.AppDeploymentContext) error { + scopedLog := log.WithName("initAndCheckAppInfoStatus").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) + + var err error + // Register the S3 clients specific to providers if not done already + // This is done to prevent the null pointer dereference in case when + // operator crashes and comes back up and the status of app context was updated + // to match the spec in the previous run. + initAppFrameWorkContext(appFrameworkConf, appStatusContext) + + //check if the apps need to be downloaded from remote storage + if HasAppRepoCheckTimerExpired(appStatusContext) || !reflect.DeepEqual(appStatusContext.AppFrameworkConfig, *appFrameworkConf) { + var sourceToAppsList map[string]splclient.S3Response + + scopedLog.Info("Checking status of apps on remote storage...") + + sourceToAppsList, err = GetAppListFromS3Bucket(client, cr, appFrameworkConf) + // TODO: gaurav, we need to handle this case better in Phase-3. There can be a possibility + // where if an appSource is missing in remote store, we mark it for deletion. But if it comes up + // next time, we will recycle the pod to install the app. We need to find a way to reduce the pod recycles. + if len(sourceToAppsList) != len(appFrameworkConf.AppSources) { + scopedLog.Error(err, "Unable to get apps list, will retry in next reconcile...") + } else { + + for _, appSource := range appFrameworkConf.AppSources { + scopedLog.Info("Apps List retrieved from remote storage", "App Source", appSource.Name, "Content", sourceToAppsList[appSource.Name].Objects) + } + + // Only handle the app repo changes if we were able to successfully get the apps list + err = handleAppRepoChanges(client, cr, appStatusContext, sourceToAppsList, appFrameworkConf) + if err != nil { + scopedLog.Error(err, "Unable to use the App list retrieved from the remote storage") + return err + } + + _, _, err = ApplyAppListingConfigMap(client, cr, appFrameworkConf, appStatusContext.AppsSrcDeployStatus) + if err != nil { + return err + } + + appStatusContext.AppFrameworkConfig = *appFrameworkConf + } + + // set the last check time to current time + SetLastAppInfoCheckTime(appStatusContext) + } + + return nil +} diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 78b0df21e..ead6442b9 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -15,12 +15,16 @@ package enterprise import ( + "fmt" + "strconv" "testing" + "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + splclient "github.com/splunk/splunk-operator/pkg/splunk/client" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" @@ -38,7 +42,7 @@ func TestApplySplunkConfig(t *testing.T) { } createCalls := map[string][]spltest.MockFuncCall{"Get": funcCalls, "Create": {funcCalls[0], funcCalls[2]}, "Update": {funcCalls[0]}} updateCalls := map[string][]spltest.MockFuncCall{"Get": {funcCalls[0], funcCalls[1], funcCalls[2]}} - searchHeadCR := enterprisev1.SearchHeadCluster{ + searchHeadCR := enterpriseApi.SearchHeadCluster{ TypeMeta: metav1.TypeMeta{ Kind: "SearcHead", }, @@ -51,7 +55,7 @@ func TestApplySplunkConfig(t *testing.T) { searchHeadRevised := searchHeadCR.DeepCopy() searchHeadRevised.Spec.Image = "splunk/test" reconcile := func(c *spltest.MockClient, cr interface{}) error { - obj := cr.(*enterprisev1.SearchHeadCluster) + obj := cr.(*enterpriseApi.SearchHeadCluster) _, err := ApplySplunkConfig(c, obj, obj.Spec.CommonSplunkSpec, SplunkSearchHead) return err } @@ -62,7 +66,7 @@ func TestApplySplunkConfig(t *testing.T) { spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplySplunkConfig", &searchHeadCR, searchHeadRevised, createCalls, updateCalls, reconcile, false) // test indexer with license master - indexerCR := enterprisev1.IndexerCluster{ + indexerCR := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -75,7 +79,7 @@ func TestApplySplunkConfig(t *testing.T) { indexerRevised.Spec.Image = "splunk/test" indexerRevised.Spec.LicenseMasterRef.Name = "stack2" reconcile = func(c *spltest.MockClient, cr interface{}) error { - obj := cr.(*enterprisev1.IndexerCluster) + obj := cr.(*enterpriseApi.IndexerCluster) _, err := ApplySplunkConfig(c, obj, obj.Spec.CommonSplunkSpec, SplunkIndexer) return err } @@ -89,7 +93,7 @@ func TestApplySplunkConfig(t *testing.T) { } func TestGetLicenseMasterURL(t *testing.T) { - cr := enterprisev1.LicenseMaster{ + cr := enterpriseApi.LicenseMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "stack1", Namespace: "test", @@ -127,28 +131,28 @@ func TestGetLicenseMasterURL(t *testing.T) { } func TestApplySmartstoreConfigMap(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "idxCluster", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -171,7 +175,7 @@ func TestApplySmartstoreConfigMap(t *testing.T) { t.Errorf(err.Error()) } - test := func(client *spltest.MockClient, cr splcommon.MetaObject, smartstore *enterprisev1.SmartStoreSpec, want string) { + test := func(client *spltest.MockClient, cr splcommon.MetaObject, smartstore *enterpriseApi.SmartStoreSpec, want string) { f := func() (interface{}, error) { configMap, _, err := ApplySmartstoreConfigMap(client, cr, smartstore) configMap.Data["conftoken"] = "1601945361" @@ -180,7 +184,7 @@ func TestApplySmartstoreConfigMap(t *testing.T) { configTester(t, "ApplySmartstoreConfigMap()", f, want) } - test(client, &cr, &cr.Spec.SmartStore, `{"metadata":{"name":"splunk-idxCluster--smartstore","namespace":"test","creationTimestamp":null},"data":{"conftoken":"1601945361","indexes.conf":"[default]\nrepFactor = auto\nmaxDataSize = auto\nhomePath = $SPLUNK_DB/$_index_name/db\ncoldPath = $SPLUNK_DB/$_index_name/colddb\nthawedPath = $SPLUNK_DB/$_index_name/thaweddb\n \n[volume:msos_s2s3_vol]\nstorageType = remote\npath = s3://testbucket-rs-london\nremote.s3.access_key = abcdJDckRkxhMEdmSk5FekFRRzBFOXV6bGNldzJSWE9IenhVUy80aa\nremote.s3.secret_key = g4NVp0a29PTzlPdGczWk1vekVUcVBSa0o4NkhBWWMvR1NadDV4YVEy\nremote.s3.endpoint = https://s3-eu-west-2.amazonaws.com\n \n[salesdata1]\nremotePath = volume:msos_s2s3_vol/remotepath1\n\n[salesdata2]\nremotePath = volume:msos_s2s3_vol/remotepath2\n\n[salesdata3]\nremotePath = volume:msos_s2s3_vol/remotepath3\n","server.conf":""}}`) + test(client, &cr, &cr.Spec.SmartStore, `{"metadata":{"name":"splunk-idxCluster--smartstore","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"idxCluster","uid":"","controller":true}]},"data":{"conftoken":"1601945361","indexes.conf":"[default]\nrepFactor = auto\nmaxDataSize = auto\nhomePath = $SPLUNK_DB/$_index_name/db\ncoldPath = $SPLUNK_DB/$_index_name/colddb\nthawedPath = $SPLUNK_DB/$_index_name/thaweddb\n \n[volume:msos_s2s3_vol]\nstorageType = remote\npath = s3://testbucket-rs-london\nremote.s3.access_key = abcdJDckRkxhMEdmSk5FekFRRzBFOXV6bGNldzJSWE9IenhVUy80aa\nremote.s3.secret_key = g4NVp0a29PTzlPdGczWk1vekVUcVBSa0o4NkhBWWMvR1NadDV4YVEy\nremote.s3.endpoint = https://s3-eu-west-2.amazonaws.com\n \n[salesdata1]\nremotePath = volume:msos_s2s3_vol/remotepath1\n\n[salesdata2]\nremotePath = volume:msos_s2s3_vol/remotepath2\n\n[salesdata3]\nremotePath = volume:msos_s2s3_vol/remotepath3\n","server.conf":""}}`) // Missing Volume config should return an error cr.Spec.SmartStore.VolList = nil @@ -190,32 +194,132 @@ func TestApplySmartstoreConfigMap(t *testing.T) { } } +func TestApplyAppListingConfigMap(t *testing.T) { + cr := enterpriseApi.ClusterMaster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clusterMaster", + //Name: "idxCluster", + Namespace: "test", + }, + Spec: enterpriseApi.ClusterMasterSpec{ + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", + Endpoint: "https://s3-eu-west-2.amazonaws.com", + Path: "testbucket-rs-london", + SecretRef: "s3-secret", + Type: "s3", + Provider: "aws"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + var S3Response splclient.S3Response + + remoteObjListMap := make(map[string]splclient.S3Response) + + // Fill appSrc adminApps + startAppPathAndName := "adminCategoryOne.tgz" + S3Response.Objects = createRemoteObjectList("b41d8cd98f00", startAppPathAndName, 2322, nil, 10) + remoteObjListMap[cr.Spec.AppFrameworkConfig.AppSources[0].Name] = S3Response + + startAppPathAndName = "securityCategoryOne.tgz" + S3Response.Objects = createRemoteObjectList("c41d8cd98f00", startAppPathAndName, 3322, nil, 10) + remoteObjListMap[cr.Spec.AppFrameworkConfig.AppSources[1].Name] = S3Response + + startAppPathAndName = "authenticationCategoryOne.tgz" + S3Response.Objects = createRemoteObjectList("d41d8cd98f00", startAppPathAndName, 4322, nil, 10) + remoteObjListMap[cr.Spec.AppFrameworkConfig.AppSources[2].Name] = S3Response + + // set the status context + initAppFrameWorkContext(&cr.Spec.AppFrameworkConfig, &cr.Status.AppContext) + + err := handleAppRepoChanges(client, &cr, &cr.Status.AppContext, remoteObjListMap, &cr.Spec.AppFrameworkConfig) + + if err != nil { + t.Errorf("Empty remote Object list should not trigger an error, but got error : %v", err) + } + + testAppListingConfigMap := func(client *spltest.MockClient, cr splcommon.MetaObject, appConf *enterpriseApi.AppFrameworkSpec, appsSrcDeployStatus map[string]enterpriseApi.AppSrcDeployInfo, want string) { + f := func() (interface{}, error) { + configMap, _, err := ApplyAppListingConfigMap(client, cr, appConf, appsSrcDeployStatus) + // Make the config token as predictable + configMap.Data[appsUpdateToken] = "1601945361" + return configMap, err + } + configTester(t, "(ApplyAppListingConfigMap)", f, want) + } + + testAppListingConfigMap(client, &cr, &cr.Spec.AppFrameworkConfig, cr.Status.AppContext.AppsSrcDeployStatus, `{"metadata":{"name":"splunk-clusterMaster--app-list","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"clusterMaster","uid":"","controller":true}]},"data":{"app-list-local.yaml":"splunk:\n app_paths_install:\n default:\n - \"/init-apps/adminApps/1_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/2_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/3_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/4_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/5_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/6_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/7_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/8_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/9_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/10_adminCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/1_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/2_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/3_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/4_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/5_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/6_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/7_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/8_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/9_authenticationCategoryOne.tgz\"\n - \"/init-apps/authenticationApps/10_authenticationCategoryOne.tgz\"\n - \"/init-apps/securityApps/1_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/2_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/3_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/4_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/5_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/6_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/7_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/8_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/9_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/10_securityCategoryOne.tgz\"","appsUpdateToken":"1601945361"}}`) + + // Now test the Cluster master stateful set, to validate the Pod updates with the app listing config map + _, err = splutil.ApplyNamespaceScopedSecretObject(client, "test") + if err != nil { + t.Errorf("Failed to create namespace scoped object") + } + + testStsWithAppListVolMounts := func(want string) { + f := func() (interface{}, error) { + if err := validateClusterMasterSpec(&cr); err != nil { + t.Errorf("validateClusterMasterSpec() returned error: %v", err) + } + return getClusterMasterStatefulSet(client, &cr) + } + configTester(t, fmt.Sprintf("getClusterMasterStatefulSet"), f, want) + } + + testStsWithAppListVolMounts(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-clusterMaster-cluster-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"clusterMaster","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-clusterMaster-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-clusterMaster-indexer"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-clusterMaster-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-clusterMaster-indexer"},"annotations":{"appListingRev":"","traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-clusterMaster-cluster-master-secret-v1","defaultMode":420}},{"name":"mnt-app-listing","configMap":{"name":"splunk-clusterMaster--app-list","items":[{"key":"app-list-local.yaml","path":"app-list-local.yaml","mode":420},{"key":"appsUpdateToken","path":"appsUpdateToken","mode":420}],"defaultMode":420}},{"name":"init-apps","emptyDir":{}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/app-listing/app-list-local.yaml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_cluster_master"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_CLUSTER_MASTER_URL","value":"localhost"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"},{"name":"mnt-app-listing","mountPath":"/mnt/app-listing/"},{"name":"init-apps","mountPath":"/init-apps/"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":455,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":165,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-clusterMaster-cluster-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-clusterMaster-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-clusterMaster-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"indexer","app.kubernetes.io/instance":"splunk-clusterMaster-cluster-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"cluster-master","app.kubernetes.io/part-of":"splunk-clusterMaster-indexer"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-clusterMaster-cluster-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) +} + func TestRemoveOwenerReferencesForSecretObjectsReferredBySmartstoreVolumes(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "idxCluster", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, {Name: "msos_s2s3_vol_2", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, {Name: "msos_s2s3_vol_3", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, {Name: "msos_s2s3_vol_4", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, }, - IndexList: []enterprisev1.IndexSpec{ + IndexList: []enterpriseApi.IndexSpec{ {Name: "salesdata1", RemotePath: "remotepath1", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata2", RemotePath: "remotepath2", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, {Name: "salesdata3", RemotePath: "remotepath3", - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: "msos_s2s3_vol"}, }, }, @@ -252,14 +356,14 @@ func TestRemoveOwenerReferencesForSecretObjectsReferredBySmartstoreVolumes(t *te // If the secret object doesn't exist, should return an error // Here in the volume references, secrets splunk-test-sec_1, to splunk-test-sec_4 doesn't exist - cr = enterprisev1.ClusterMaster{ + cr = enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "idxCluster", Namespace: "testWithNoSecret", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-sec_1"}, {Name: "msos_s2s3_vol_2", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-sec_2"}, {Name: "msos_s2s3_vol_3", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-sec_3"}, @@ -283,14 +387,14 @@ func TestRemoveOwenerReferencesForSecretObjectsReferredBySmartstoreVolumes(t *te } func TestGetSmartstoreRemoteVolumeSecrets(t *testing.T) { - cr := enterprisev1.ClusterMaster{ + cr := enterpriseApi.ClusterMaster{ ObjectMeta: metav1.ObjectMeta{ Name: "CM", Namespace: "test", }, - Spec: enterprisev1.ClusterMasterSpec{ - SmartStore: enterprisev1.SmartStoreSpec{ - VolList: []enterprisev1.VolumeSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + SmartStore: enterpriseApi.SmartStoreSpec{ + VolList: []enterpriseApi.VolumeSpec{ {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "splunk-test-secret"}, }, }, @@ -331,3 +435,345 @@ func TestGetSmartstoreRemoteVolumeSecrets(t *testing.T) { t.Errorf("Missing S3 Keys / Error not expected, when the Secret object with the S3 specific keys are present") } } + +func TestCheckIfAnAppIsActiveOnRemoteStore(t *testing.T) { + var remoteObjList []*splclient.RemoteObject + var entry *splclient.RemoteObject + + tmpAppName := "xys.spl" + entry = allocateRemoteObject("d41d8cd98f00", tmpAppName, 2322, nil) + + remoteObjList = append(remoteObjList, entry) + + if !checkIfAnAppIsActiveOnRemoteStore(tmpAppName, remoteObjList) { + t.Errorf("Failed to detect for a valid app from remote listing") + } + + if checkIfAnAppIsActiveOnRemoteStore("app10.tgz", remoteObjList) { + t.Errorf("Non existing app is reported as existing") + } + +} + +func TestHandleAppRepoChanges(t *testing.T) { + cr := enterpriseApi.Standalone{ + ObjectMeta: metav1.ObjectMeta{ + Name: "Clustermaster", + Namespace: "test", + }, + Spec: enterpriseApi.StandaloneSpec{ + Replicas: 1, + AppFrameworkConfig: enterpriseApi.AppFrameworkSpec{ + VolList: []enterpriseApi.VolumeSpec{ + {Name: "msos_s2s3_vol", Endpoint: "https://s3-eu-west-2.amazonaws.com", Path: "testbucket-rs-london", SecretRef: "s3-secret"}, + }, + AppSources: []enterpriseApi.AppSourceSpec{ + {Name: "adminApps", + Location: "adminAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "securityApps", + Location: "securityAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + {Name: "authenticationApps", + Location: "authenticationAppsRepo", + AppSourceDefaultSpec: enterpriseApi.AppSourceDefaultSpec{ + VolName: "msos_s2s3_vol", + Scope: "local"}, + }, + }, + }, + }, + } + + client := spltest.NewMockClient() + + var appDeployContext enterpriseApi.AppDeploymentContext + var remoteObjListMap map[string]splclient.S3Response + var appFramworkConf enterpriseApi.AppFrameworkSpec = cr.Spec.AppFrameworkConfig + var err error + + var S3Response splclient.S3Response + + // Test-1: Empty remoteObjectList Map should return an error + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + + if err != nil { + t.Errorf("Empty remote Object list should not trigger an error, but got error : %v", err) + } + + // Test-2: Valid remoteObjectList should not cause an error + startAppPathAndName := "bucketpath1/bpath2/locationpath1/lpath2/adminCategoryOne.tgz" + remoteObjListMap = make(map[string]splclient.S3Response) + // Prepare a S3Response + S3Response.Objects = createRemoteObjectList("d41d8cd98f00", startAppPathAndName, 2322, nil, 10) + // Set the app source with a matching one + remoteObjListMap[appFramworkConf.AppSources[0].Name] = S3Response + + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + if err != nil { + t.Errorf("Could not handle a valid remote listing. Error: %v", err) + } + + _, err = validateAppSrcDeployInfoByStateAndStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + if err != nil { + t.Errorf("Unexpected app status. Error: %v", err) + } + + // Test-3: If the App Resource is not found in the remote object listing, all the corresponding Apps should be deleted/disabled + delete(remoteObjListMap, appFramworkConf.AppSources[0].Name) + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + if err != nil { + t.Errorf("Could not handle a valid remote listing. Error: %v", err) + } + + _, err = validateAppSrcDeployInfoByStateAndStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending) + if err != nil { + t.Errorf("Unable to delete/disable Apps, when the AppSource is deleted. Unexpected app status. Error: %v", err) + } + setStateAndStatusForAppDeployInfoList(appDeployContext.AppsSrcDeployStatus[appFramworkConf.AppSources[0].Name].AppDeploymentInfoList, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + + // Test-4: If the App Resource is not found in the config, all the corresponding Apps should be deleted/disabled + tmpAppSrcName := appFramworkConf.AppSources[0].Name + appFramworkConf.AppSources[0].Name = "invalidName" + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + if err != nil { + t.Errorf("Could not handle a valid remote listing. Error: %v", err) + } + appFramworkConf.AppSources[0].Name = tmpAppSrcName + + _, err = validateAppSrcDeployInfoByStateAndStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending) + if err != nil { + t.Errorf("Unable to delete/disable Apps, when the AppSource is deleted from the config. Unexpected app status. Error: %v", err) + } + + // Test-5: Changing the AppSource deployment info should change for all the Apps in the list + changeAppSrcDeployInfoStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending, enterpriseApi.DeployStatusInProgress) + _, err = validateAppSrcDeployInfoByStateAndStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusInProgress) + if err != nil { + t.Errorf("Invalid AppSrc deployment info detected. Error: %v", err) + } + + // Test-6: When an App is deleted on remote store, it should be marked as deleted + setStateAndStatusForAppDeployInfoList(appDeployContext.AppsSrcDeployStatus[appFramworkConf.AppSources[0].Name].AppDeploymentInfoList, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + + // delete an object on remote store for the app source + tmpS3Response := S3Response + tmpS3Response.Objects = append(tmpS3Response.Objects[:0], tmpS3Response.Objects[1:]...) + remoteObjListMap[appFramworkConf.AppSources[0].Name] = tmpS3Response + + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + if err != nil { + t.Errorf("Could not handle a valid remote listing. Error: %v", err) + } + + _, err = validateAppSrcDeployInfoByStateAndStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + if err != nil { + t.Errorf("Unable to delete/disable an app when the App is deleted from remote store. Error: %v", err) + } + + // Test-7: Object hash change on the remote store should cause App state and status as Active and Pending. + S3Response.Objects = createRemoteObjectList("e41d8cd98f00", startAppPathAndName, 2322, nil, 10) + remoteObjListMap[appFramworkConf.AppSources[0].Name] = S3Response + + setStateAndStatusForAppDeployInfoList(appDeployContext.AppsSrcDeployStatus[appFramworkConf.AppSources[0].Name].AppDeploymentInfoList, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusComplete) + + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + if err != nil { + t.Errorf("Could not handle a valid remote listing. Error: %v", err) + } + + _, err = validateAppSrcDeployInfoByStateAndStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + if err != nil { + t.Errorf("Unable to detect the change, when the object changed. Error: %v", err) + } + + // Test-8: For an AppSrc, when all the Apps are deleted on remote store and re-introduced, should modify the state to active and pending + setStateAndStatusForAppDeployInfoList(appDeployContext.AppsSrcDeployStatus[appFramworkConf.AppSources[0].Name].AppDeploymentInfoList, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusComplete) + + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + if err != nil { + t.Errorf("Could not handle a valid remote listing. Error: %v", err) + } + + _, err = validateAppSrcDeployInfoByStateAndStatus(appFramworkConf.AppSources[0].Name, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + if err != nil { + t.Errorf("Unable to delete/disable the Apps when the Apps are deleted from remote store. Error: %v", err) + } + + // Test-9: Unknown App source in remote obj listing should return an error + startAppPathAndName = "csecurityApps.spl" + S3Response.Objects = createRemoteObjectList("d41d8cd98f00", startAppPathAndName, 2322, nil, 10) + invalidAppSourceName := "UnknownAppSourceInConfig" + remoteObjListMap[invalidAppSourceName] = S3Response + err = handleAppRepoChanges(client, &cr, &appDeployContext, remoteObjListMap, &appFramworkConf) + + if err == nil { + t.Errorf("Unable to return an error, when the remote listing contain unknown App source") + } + delete(remoteObjListMap, invalidAppSourceName) + + // Test-10: Setting all apps in AppSrc to complete should mark all the apps status as complete irrespective of their state + // 10.1 Check for state=Active and status=Complete + for appSrc, appSrcDeployStatus := range appDeployContext.AppsSrcDeployStatus { + // ToDo: Enable for Phase-3 + //setStateAndStatusForAppDeployInfoList(appSrcDeployStatus.AppDeploymentInfoList, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusInProgress) + setStateAndStatusForAppDeployInfoList(appSrcDeployStatus.AppDeploymentInfoList, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + appDeployContext.AppsSrcDeployStatus[appSrc] = appSrcDeployStatus + + // ToDo: Enable for Phase-3 + //expectedMatchCount := getAppSrcDeployInfoCountByStateAndStatus(appSrc, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusInProgress) + expectedMatchCount := getAppSrcDeployInfoCountByStateAndStatus(appSrc, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusPending) + + markAppsStatusToComplete(appDeployContext.AppsSrcDeployStatus) + + matchCount, err := validateAppSrcDeployInfoByStateAndStatus(appSrc, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusComplete) + if err != nil { + t.Errorf("Unable to change the Apps status to complete, once the changes are reflecting on the Pod. Error: %v", err) + } + if expectedMatchCount != matchCount { + t.Errorf("App status change failed. Expected count %v, returned count %v", expectedMatchCount, matchCount) + } + } + + // 10.2 Check for state=Deleted status=Complete + for appSrc, appSrcDeployStatus := range appDeployContext.AppsSrcDeployStatus { + // ToDo: Enable for Phase-3 + //setStateAndStatusForAppDeployInfoList(appSrcDeployStatus.AppDeploymentInfoList, enterpriseApi.RepoStateActive, enterpriseApi.DeployStatusInProgress) + setStateAndStatusForAppDeployInfoList(appSrcDeployStatus.AppDeploymentInfoList, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending) + appDeployContext.AppsSrcDeployStatus[appSrc] = appSrcDeployStatus + + // ToDo: Enable for Phase-3 + //expectedMatchCount := getAppSrcDeployInfoCountByStateAndStatus(appSrc, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusInProgress) + expectedMatchCount := getAppSrcDeployInfoCountByStateAndStatus(appSrc, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusPending) + + markAppsStatusToComplete(appDeployContext.AppsSrcDeployStatus) + + matchCount, err := validateAppSrcDeployInfoByStateAndStatus(appSrc, appDeployContext.AppsSrcDeployStatus, enterpriseApi.RepoStateDeleted, enterpriseApi.DeployStatusComplete) + if err != nil { + t.Errorf("Unable to delete/disable an app when the App is deleted from remote store. Error: %v", err) + } + if expectedMatchCount != matchCount { + t.Errorf("App status change failed. Expected count %v, returned count %v", expectedMatchCount, matchCount) + } + } +} + +func TestIsAppExtentionValid(t *testing.T) { + if !isAppExtentionValid("testapp.spl") || !isAppExtentionValid("testapp.tgz") { + t.Errorf("failed to detect valid app extension") + } + + if isAppExtentionValid("testapp.aspl") || isAppExtentionValid("testapp.ttgz") { + t.Errorf("failed to detect invalid app extension") + } +} + +func TestHasAppRepoCheckTimerExpired(t *testing.T) { + + // Case 1. This is the case when we first enter the reconcile loop. + appInfoContext := &enterpriseApi.AppDeploymentContext{ + LastAppInfoCheckTime: 0, + } + + if !HasAppRepoCheckTimerExpired(appInfoContext) { + t.Errorf("ShouldCheckAppStatus should have returned true") + } + + appInfoContext.AppsRepoStatusPollInterval = 60 + + // Case 2. We just checked the apps status + SetLastAppInfoCheckTime(appInfoContext) + + if HasAppRepoCheckTimerExpired(appInfoContext) { + t.Errorf("ShouldCheckAppStatus should have returned false since we just checked the apps status") + } + + // Case 3. Lets check after AppsRepoPollInterval has elapsed. + // We do this by setting some random past timestamp. + appInfoContext.LastAppInfoCheckTime = 1591464060 + + if !HasAppRepoCheckTimerExpired(appInfoContext) { + t.Errorf("ShouldCheckAppStatus should have returned true") + } +} + +func allocateRemoteObject(etag string, key string, Size int64, lastModified *time.Time) *splclient.RemoteObject { + var remoteObj splclient.RemoteObject + + remoteObj.Etag = &etag + remoteObj.Key = &key + remoteObj.Size = &Size + //tmpEntry.LastModified = lastModified + + return &remoteObj +} + +func createRemoteObjectList(etag string, key string, Size int64, lastModified *time.Time, count uint16) []*splclient.RemoteObject { + var remoteObjList []*splclient.RemoteObject + var remoteObj *splclient.RemoteObject + + for i := 1; i <= int(count); i++ { + tag := strconv.Itoa(i) + remoteObj = allocateRemoteObject(tag+etag, tag+"_"+key, Size+int64(i), nil) + remoteObjList = append(remoteObjList, remoteObj) + } + + return remoteObjList +} + +func validateAppSrcDeployInfoByStateAndStatus(appSrc string, appSrcDeployStatus map[string]enterpriseApi.AppSrcDeployInfo, repoState enterpriseApi.AppRepoState, deployStatus enterpriseApi.AppDeploymentStatus) (int, error) { + var matchCount int + if appSrcDeploymentInfo, ok := appSrcDeployStatus[appSrc]; ok { + appDeployInfoList := appSrcDeploymentInfo.AppDeploymentInfoList + for _, appDeployInfo := range appDeployInfoList { + // Check if the app status is as expected + if appDeployInfo.RepoState == repoState && appDeployInfo.DeployStatus != deployStatus { + return matchCount, fmt.Errorf("Invalid app status for appSrc %s, appName: %s", appSrc, appDeployInfo.AppName) + } + matchCount++ + } + } else { + return matchCount, fmt.Errorf("Missing app source %s, shouldn't not happen", appSrc) + } + + return matchCount, nil +} + +func getAppSrcDeployInfoCountByStateAndStatus(appSrc string, appSrcDeployStatus map[string]enterpriseApi.AppSrcDeployInfo, repoState enterpriseApi.AppRepoState, deployStatus enterpriseApi.AppDeploymentStatus) int { + var matchCount int + if appSrcDeploymentInfo, ok := appSrcDeployStatus[appSrc]; ok { + appDeployInfoList := appSrcDeploymentInfo.AppDeploymentInfoList + for _, appDeployInfo := range appDeployInfoList { + // Check if the app status is as expected + if appDeployInfo.RepoState == repoState && appDeployInfo.DeployStatus == deployStatus { + matchCount++ + } + } + } + + return matchCount +} + +func TestSetLastAppInfoCheckTime(t *testing.T) { + appInfoStatus := &enterpriseApi.AppDeploymentContext{} + SetLastAppInfoCheckTime(appInfoStatus) + + if appInfoStatus.LastAppInfoCheckTime != time.Now().Unix() { + t.Errorf("LastAppInfoCheckTime should have been set to current time") + } +} + +func TestGetNextRequeueTime(t *testing.T) { + appFrameworkContext := enterpriseApi.AppDeploymentContext{} + appFrameworkContext.AppsRepoStatusPollInterval = 60 + nextRequeueTime := GetNextRequeueTime(appFrameworkContext.AppsRepoStatusPollInterval, (time.Now().Unix() - int64(40))) + if nextRequeueTime > time.Second*20 { + t.Errorf("Got wrong next requeue time") + } +} diff --git a/pkg/splunk/test/awss3client.go b/pkg/splunk/test/awss3client.go new file mode 100644 index 000000000..192e2cbc9 --- /dev/null +++ b/pkg/splunk/test/awss3client.go @@ -0,0 +1,103 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package test + +import ( + "encoding/json" + "reflect" + "testing" + "time" + + "github.com/aws/aws-sdk-go/service/s3" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" +) + +// MockAWSS3Object struct contains contents returned as part of S3 response +type MockAWSS3Object struct { + Etag *string + Key *string + LastModified *time.Time + Size *int64 + StorageClass *string +} + +// MockAWSS3Client is used to store all the objects for an app source +type MockAWSS3Client struct { + Objects []*MockAWSS3Object +} + +// MockAWSS3Handler is used for checking response received +type MockAWSS3Handler struct { + WantSourceAppListResponseMap map[string]MockAWSS3Client + GotSourceAppListResponseMap map[string]MockAWSS3Client +} + +// AddObjects adds mock AWS S3 Objects to handler +func (c *MockAWSS3Handler) AddObjects(appFrameworkRef enterpriseApi.AppFrameworkSpec, objects ...MockAWSS3Client) { + for n := range objects { + mockAWSS3Client := objects[n] + appSource := appFrameworkRef.AppSources[n] + if c.WantSourceAppListResponseMap == nil { + c.WantSourceAppListResponseMap = make(map[string]MockAWSS3Client) + } + c.WantSourceAppListResponseMap[appSource.Name] = mockAWSS3Client + } +} + +// CheckAWSS3Response checks if the received objects are same as the one we expect +func (c *MockAWSS3Handler) CheckAWSS3Response(t *testing.T, testMethod string) { + if len(c.WantSourceAppListResponseMap) != len(c.GotSourceAppListResponseMap) { + t.Fatalf("%s got %d Responses; want %d", testMethod, len(c.GotSourceAppListResponseMap), len(c.WantSourceAppListResponseMap)) + } + for appSourceName, gotObjects := range c.GotSourceAppListResponseMap { + wantObjects := c.WantSourceAppListResponseMap[appSourceName] + if !reflect.DeepEqual(gotObjects.Objects, wantObjects.Objects) { + for n, gotObject := range gotObjects.Objects { + if *gotObject.Etag != *wantObjects.Objects[n].Etag { + t.Errorf("%s GotResponse[%s] Etag=%s; want %s", testMethod, appSourceName, *gotObject.Etag, *wantObjects.Objects[n].Etag) + } + if *gotObject.Key != *wantObjects.Objects[n].Key { + t.Errorf("%s GotResponse[%s] Key=%s; want %s", testMethod, appSourceName, *gotObject.Key, *wantObjects.Objects[n].Key) + } + if *gotObject.StorageClass != *wantObjects.Objects[n].StorageClass { + t.Errorf("%s GotResponse[%s] StorageClass=%s; want %s", testMethod, appSourceName, *gotObject.StorageClass, *wantObjects.Objects[n].StorageClass) + } + if *gotObject.Size != *wantObjects.Objects[n].Size { + t.Errorf("%s GotResponse[%s] Size=%d; want %d", testMethod, appSourceName, *gotObject.Size, *wantObjects.Objects[n].Size) + } + if *gotObject.LastModified != *wantObjects.Objects[n].LastModified { + t.Errorf("%s GotResponse[%s] LastModified=%s; want %s", testMethod, appSourceName, gotObject.LastModified.String(), wantObjects.Objects[n].LastModified.String()) + } + } + } + } +} + +// ListObjectsV2 is a mock call to ListObjectsV2 +func (mockClient MockAWSS3Client) ListObjectsV2(options *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { + output := &s3.ListObjectsV2Output{} + + tmp, err := json.Marshal(mockClient.Objects) + if err != nil { + return nil, err + } + + err = json.Unmarshal(tmp, &output.Contents) + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/pkg/splunk/test/controller.go b/pkg/splunk/test/controller.go index 29d23fbd4..9e566def3 100644 --- a/pkg/splunk/test/controller.go +++ b/pkg/splunk/test/controller.go @@ -21,7 +21,7 @@ import ( "reflect" "testing" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,16 +47,16 @@ func enterpriseObjCopier(dst, src *runtime.Object) bool { dstP := *dst srcP := *src switch srcP.(type) { - case *enterprisev1.ClusterMaster: - *dstP.(*enterprisev1.ClusterMaster) = *srcP.(*enterprisev1.ClusterMaster) - case *enterprisev1.IndexerCluster: - *dstP.(*enterprisev1.IndexerCluster) = *srcP.(*enterprisev1.IndexerCluster) - case *enterprisev1.LicenseMaster: - *dstP.(*enterprisev1.LicenseMaster) = *srcP.(*enterprisev1.LicenseMaster) - case *enterprisev1.SearchHeadCluster: - *dstP.(*enterprisev1.SearchHeadCluster) = *srcP.(*enterprisev1.SearchHeadCluster) - case *enterprisev1.Standalone: - *dstP.(*enterprisev1.Standalone) = *srcP.(*enterprisev1.Standalone) + case *enterpriseApi.ClusterMaster: + *dstP.(*enterpriseApi.ClusterMaster) = *srcP.(*enterpriseApi.ClusterMaster) + case *enterpriseApi.IndexerCluster: + *dstP.(*enterpriseApi.IndexerCluster) = *srcP.(*enterpriseApi.IndexerCluster) + case *enterpriseApi.LicenseMaster: + *dstP.(*enterpriseApi.LicenseMaster) = *srcP.(*enterpriseApi.LicenseMaster) + case *enterpriseApi.SearchHeadCluster: + *dstP.(*enterpriseApi.SearchHeadCluster) = *srcP.(*enterpriseApi.SearchHeadCluster) + case *enterpriseApi.Standalone: + *dstP.(*enterpriseApi.Standalone) = *srcP.(*enterpriseApi.Standalone) default: return false } diff --git a/pkg/splunk/test/util.go b/pkg/splunk/test/util.go new file mode 100644 index 000000000..ad8c96e00 --- /dev/null +++ b/pkg/splunk/test/util.go @@ -0,0 +1,44 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package test includes common code used for testing other modules. +This package has no dependencies outside of the standard go and kubernetes libraries, +and the splunk.common package. +*/ +package test + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetMockS3SecretKeys returns S3 secret keys +func GetMockS3SecretKeys(name string) corev1.Secret { + accessKey := []byte{'1'} + secretKey := []byte{'2'} + + // Create S3 secret + s3Secret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "test", + }, + Data: map[string][]byte{ + "s3_access_key": accessKey, + "s3_secret_key": secretKey, + }, + } + return s3Secret +} diff --git a/pkg/splunk/util/secrets.go b/pkg/splunk/util/secrets.go index 4b577b377..e2be6c6e1 100644 --- a/pkg/splunk/util/secrets.go +++ b/pkg/splunk/util/secrets.go @@ -478,6 +478,7 @@ func ApplyNamespaceScopedSecretObject(client splcommon.ControllerClient, namespa // GetSecretByName retrieves namespace scoped secret object for a given name func GetSecretByName(c splcommon.ControllerClient, cr splcommon.MetaObject, name string) (*corev1.Secret, error) { var namespaceScopedSecret corev1.Secret + scopedLog := log.WithName("GetSecretByName").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) // Check if a namespace scoped secret exists namespacedName := types.NamespacedName{Namespace: cr.GetNamespace(), Name: name} @@ -485,6 +486,7 @@ func GetSecretByName(c splcommon.ControllerClient, cr splcommon.MetaObject, name if err != nil { // Didn't find it + scopedLog.Error(err, "Unable to get secret", "secret name", name) return nil, err } diff --git a/test/c3/appframework/appframework_suite_test.go b/test/c3/appframework/appframework_suite_test.go new file mode 100644 index 000000000..c9f2c0adb --- /dev/null +++ b/test/c3/appframework/appframework_suite_test.go @@ -0,0 +1,98 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package c3appfw + +import ( + "os" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" + . "github.com/onsi/gomega" + + "github.com/splunk/splunk-operator/test/testenv" +) + +const ( + // PollInterval specifies the polling interval + PollInterval = 5 * time.Second + + // ConsistentPollInterval is the interval to use to consistently check a state is stable + ConsistentPollInterval = 200 * time.Millisecond + ConsistentDuration = 2000 * time.Millisecond +) + +var ( + testenvInstance *testenv.TestEnv + testSuiteName = "c3appfw-" + testenv.RandomDNSName(3) + appListV1 []string + appListV2 []string + testDataS3Bucket = os.Getenv("TEST_BUCKET") + testS3Bucket = os.Getenv("TEST_INDEXES_S3_BUCKET") + s3AppDirV1 = "appframework/regressionappsv1/" + s3AppDirV2 = "appframework/regressionappsv2/" + currDir, _ = os.Getwd() + downloadDirV1 = filepath.Join(currDir, "c3appfwV1-"+testenv.RandomDNSName(4)) + downloadDirV2 = filepath.Join(currDir, "c3appfwV2-"+testenv.RandomDNSName(4)) +) + +// TestBasic is the main entry point +func TestBasic(t *testing.T) { + + RegisterFailHandler(Fail) + + junitReporter := reporters.NewJUnitReporter(testSuiteName + "_junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Running "+testSuiteName, []Reporter{junitReporter}) +} + +var _ = BeforeSuite(func() { + var err error + testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) + Expect(err).ToNot(HaveOccurred()) + + // Create a list of apps to upload to S3 + appListV1 = testenv.BasicApps + appFileList := testenv.GetAppFileList(appListV1, 1) + + // Download V1 Apps from S3 + err = testenv.DownloadFilesFromS3(testDataS3Bucket, s3AppDirV1, downloadDirV1, appFileList) + Expect(err).To(Succeed(), "Unable to download V1 app files") + + // Create a list of apps to upload to S3 after poll period + appListV2 = append(appListV1, testenv.NewAppsAddedBetweenPolls...) + appFileList = testenv.GetAppFileList(appListV2, 2) + + // Download V2 Apps from S3 + err = testenv.DownloadFilesFromS3(testDataS3Bucket, s3AppDirV2, downloadDirV2, appFileList) + Expect(err).To(Succeed(), "Unable to download V2 app files") + +}) + +var _ = AfterSuite(func() { + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } + + // Delete locally downloaded app files + err := os.RemoveAll(downloadDirV1) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V1 app files") + err = os.RemoveAll(downloadDirV2) + Expect(err).To(Succeed(), "Unable to delete locally downloaded V2 app files") +}) diff --git a/test/c3/appframework/appframework_test.go b/test/c3/appframework/appframework_test.go new file mode 100644 index 000000000..f2d2adff5 --- /dev/null +++ b/test/c3/appframework/appframework_test.go @@ -0,0 +1,426 @@ +// Copyright (c) 2018-2021 Splunk Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.s +package c3appfw + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + testenv "github.com/splunk/splunk-operator/test/testenv" + corev1 "k8s.io/api/core/v1" +) + +var _ = Describe("c3appfw test", func() { + + var deployment *testenv.Deployment + var s3TestDir string + var uploadedApps []string + + BeforeEach(func() { + var err error + deployment, err = testenvInstance.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + + // Upload V1 apps to S3 + s3TestDir = "c3appfw-" + testenv.RandomDNSName(4) + appFileList := testenv.GetAppFileList(appListV1, 1) + uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload apps to S3 test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + }) + + AfterEach(func() { + // When a test spec failed, skip the teardown so we can troubleshoot. + if CurrentGinkgoTestDescription().Failed { + testenvInstance.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + // Delete files uploaded to S3 + if !testenvInstance.SkipTeardown { + testenv.DeleteFilesOnS3(testS3Bucket, uploadedApps) + } + }) + + Context("Single Site Indexer Cluster with SHC (C3) with App Framework", func() { + It("integration, c3, appframework: can deploy a C3 SVA with App Framework enabled", func() { + + // Create App framework Spec + volumeName := "appframework-test-volume-" + testenv.RandomDNSName(3) + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(volumeName, testenv.GetS3Endpoint(), testenvInstance.GetIndexSecretName(), "aws", "s3")} + + // AppSourceDefaultSpec: Remote Storage volume name and Scope of App deployment + appSourceDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: volumeName, + Scope: "cluster", + } + + // appSourceSpec: App source name, location and volume name and scope from appSourceDefaultSpec + appSourceName := "appframework" + testenv.RandomDNSName(3) + appSourceSpec := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceName, s3TestDir, appSourceDefaultSpec)} + + // appFrameworkSpec: AppSource settings, Poll Interval, volumes, appSources on volumes + appFrameworkSpec := enterpriseApi.AppFrameworkSpec{ + Defaults: appSourceDefaultSpec, + AppsRepoPollInterval: 60, + VolList: volumeSpec, + AppSources: appSourceSpec, + } + + indexerReplicas := 3 + + err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(deployment.GetName(), indexerReplicas, true, appFrameworkSpec, 10) + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with App framework") + + // Ensure that the cluster-master goes to Ready phase + testenv.ClusterMasterReady(deployment, testenvInstance) + + // Ensure indexers go to Ready phase + testenv.SingleSiteIndexersReady(deployment, testenvInstance) + + // Ensure search head cluster go to Ready phase + testenv.SearchHeadClusterReady(deployment, testenvInstance) + + // Verify MC Pod is Ready + testenv.MCPodReady(testenvInstance.GetName(), deployment) + + // Verify RF SF is met + testenv.VerifyRFSFMet(deployment, testenvInstance) + + // Verify Apps are downloaded by init-container + initContDownloadLocation := "/init-apps/" + appSourceName + podNames := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName()), fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + appFileList := testenv.GetAppFileList(appListV1, 1) + testenv.VerifyAppsDownloadedByInitContainer(deployment, testenvInstance, testenvInstance.GetName(), podNames, appFileList, initContDownloadLocation) + + //Verify Apps are copied to location + allPodNames := testenv.DumpGetPods(testenvInstance.GetName()) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), allPodNames, appListV1, true, true) + + // Verify apps are not copied in /etc/apps/ on CM and on Deployer (therefore not installed on Deployer and on CM) + masterPodNames := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName()), fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), masterPodNames, appListV1, false, false) + + //Verify Apps are installed + testenv.VerifyAppInstalled(deployment, testenvInstance, testenvInstance.GetName(), allPodNames, appListV1, true, "enabled", false, true) + + //Delete apps on S3 for new Apps + testenv.DeleteFilesOnS3(testS3Bucket, uploadedApps) + uploadedApps = nil + + //Upload new Versioned Apps to S3 + appFileList = testenv.GetAppFileList(appListV2, 2) + uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), "Unable to upload apps to S3 test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Wait for the poll period for the apps to be downloaded + time.Sleep(2 * time.Minute) + + // Ensure that the cluster-master goes to Ready phase + testenv.ClusterMasterReady(deployment, testenvInstance) + + // Ensure indexers go to Ready phase + testenv.SingleSiteIndexersReady(deployment, testenvInstance) + + // Ensure search head cluster go to Ready phase + testenv.SearchHeadClusterReady(deployment, testenvInstance) + + // Verify MC Pod is Ready + testenv.MCPodReady(testenvInstance.GetName(), deployment) + + // Verify RF SF is met + testenv.VerifyRFSFMet(deployment, testenvInstance) + + // Verify Apps are downloaded by init-container + testenv.VerifyAppsDownloadedByInitContainer(deployment, testenvInstance, testenvInstance.GetName(), podNames, appFileList, initContDownloadLocation) + + //Verify Apps are copied to location + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), allPodNames, appListV2, true, true) + + // Verify apps are not copied in /etc/apps/ on CM and on Deployer (therefore not installed on Deployer and on CM) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), masterPodNames, appListV2, false, false) + + //Verify Apps are updated + testenv.VerifyAppInstalled(deployment, testenvInstance, testenvInstance.GetName(), allPodNames, appListV2, true, "enabled", true, true) + + // Get instance of current SHC CR with latest config + shcName := deployment.GetName() + "-shc" + shc := &enterpriseApi.SearchHeadCluster{} + err = deployment.GetInstance(shcName, shc) + Expect(err).To(Succeed(), "Failed to get instance of Search Head Cluster") + + // Scale Search Head Cluster + defaultSHReplicas := shc.Spec.Replicas + scaledSHReplicas := defaultSHReplicas + 1 + testenvInstance.Log.Info("Scaling up Search Head Cluster", "Current Replicas", defaultSHReplicas, "New Replicas", scaledSHReplicas) + + // Update Replicas of SHC + shc.Spec.Replicas = int32(scaledSHReplicas) + err = deployment.UpdateCR(shc) + Expect(err).To(Succeed(), "Failed to scale Search Head Cluster") + + // Ensure Search Head cluster scales up and go to ScalingUp phase + testenv.VerifySearchHeadClusterPhase(deployment, testenvInstance, splcommon.PhaseScalingUp) + + // Get instance of current Indexer CR with latest config + idxcName := deployment.GetName() + "-idxc" + idxc := &enterpriseApi.IndexerCluster{} + err = deployment.GetInstance(idxcName, idxc) + Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") + + // Scale indexers + defaultIndexerReplicas := idxc.Spec.Replicas + scaledIndexerReplicas := defaultIndexerReplicas + 1 + testenvInstance.Log.Info("Scaling up Indexer Cluster", "Current Replicas", defaultIndexerReplicas, "New Replicas", scaledIndexerReplicas) + + // Update Replicas of Indexer Cluster + idxc.Spec.Replicas = int32(scaledIndexerReplicas) + err = deployment.UpdateCR(idxc) + Expect(err).To(Succeed(), "Failed to scale Indxer Cluster") + + // Ensure Indexer cluster scales up and go to ScalingUp phase + testenv.VerifyIndexerClusterPhase(deployment, testenvInstance, splcommon.PhaseScalingUp, idxcName) + + // Ensure Indexer cluster go to Ready phase + testenv.SingleSiteIndexersReady(deployment, testenvInstance) + + // Verify New Indexer On Cluster Master + indexerName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), scaledIndexerReplicas-1) + testenvInstance.Log.Info("Checking for Indexer On CM", "Indexer Name", indexerName) + Expect(testenv.CheckIndexerOnCM(deployment, indexerName)).To(Equal(true)) + + // Ensure search head cluster go to Ready phase + testenv.SearchHeadClusterReady(deployment, testenvInstance) + + // Verify MC Pod is Ready + testenv.MCPodReady(testenvInstance.GetName(), deployment) + + // Verify RF SF is met + testenv.VerifyRFSFMet(deployment, testenvInstance) + + // Verify Apps are copied to location + allPodNames = testenv.DumpGetPods(testenvInstance.GetName()) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), allPodNames, appListV2, true, true) + + // Verify apps are not copied in /etc/apps/ on CM and on Deployer (therefore not installed on Deployer and on CM) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), masterPodNames, appListV2, false, false) + + // Verify Apps are updated + testenv.VerifyAppInstalled(deployment, testenvInstance, testenvInstance.GetName(), allPodNames, appListV2, true, "enabled", true, true) + + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("smoke, c3, integration, appframework: can deploy a C3 SVA and have apps installed locally on CM and SHC Deployer", func() { + + // Create App framework Spec + // volumeSpec: Volume name, Endpoint, Path and SecretRef + volumeName := "appframework-test-volume-" + testenv.RandomDNSName(3) + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(volumeName, testenv.GetS3Endpoint(), testenvInstance.GetIndexSecretName(), "aws", "s3")} + + // AppSourceDefaultSpec: Remote Storage volume name and Scope of App deployment + appSourceDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: volumeName, + Scope: "local", + } + + // appSourceSpec: App source name, location and volume name and scope from appSourceDefaultSpec + appSourceName := "appframework-" + testenv.RandomDNSName(3) + appSourceSpec := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceName, s3TestDir, appSourceDefaultSpec)} + + // appFrameworkSpec: AppSource settings, Poll Interval, volumes, appSources on volumes + appFrameworkSpec := enterpriseApi.AppFrameworkSpec{ + Defaults: appSourceDefaultSpec, + AppsRepoPollInterval: 60, + VolList: volumeSpec, + AppSources: appSourceSpec, + } + + // Create Single site Cluster and SHC, with App Framework enabled on CM and SHC Deployer + indexerReplicas := 3 + err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(deployment.GetName(), indexerReplicas, true, appFrameworkSpec, 10) + Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with App framework") + + // Ensure that the CM goes to Ready phase + testenv.ClusterMasterReady(deployment, testenvInstance) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(deployment, testenvInstance) + + // Ensure SHC go to Ready phase + testenv.SearchHeadClusterReady(deployment, testenvInstance) + + // Verify RF SF is met + testenv.VerifyRFSFMet(deployment, testenvInstance) + + // Verify Apps are downloaded by init-container + initContDownloadLocation := "/init-apps/" + appSourceName + podNames := []string{fmt.Sprintf(testenv.ClusterMasterPod, deployment.GetName()), fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + appFileList := testenv.GetAppFileList(appListV1, 1) + testenv.VerifyAppsDownloadedByInitContainer(deployment, testenvInstance, testenvInstance.GetName(), podNames, appFileList, initContDownloadLocation) + + // Verify apps are copied at the correct location on CM and on Deployer (/etc/apps/) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), podNames, appListV1, true, false) + + // Verify apps are installed locally on CM and on SHC Deployer + testenv.VerifyAppInstalled(deployment, testenvInstance, testenvInstance.GetName(), podNames, appListV1, false, "enabled", false, false) + + // Verify apps are not copied in /etc/master-apps/ on CM and /etc/shcluster/ on Deployer (therefore not installed on peers and on SH) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), podNames, appListV1, false, true) + + //Delete apps on S3 for new Apps + testenv.DeleteFilesOnS3(testS3Bucket, uploadedApps) + uploadedApps = nil + + //Upload new Versioned Apps to S3 + appFileList = testenv.GetAppFileList(appListV2, 2) + uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV2) + Expect(err).To(Succeed(), "Unable to upload apps to S3 test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Wait for the poll period for the apps to be downloaded + time.Sleep(2 * time.Minute) + + // Ensure that the CM goes to Ready phase + testenv.ClusterMasterReady(deployment, testenvInstance) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(deployment, testenvInstance) + + // Ensure SHC go to Ready phase + testenv.SearchHeadClusterReady(deployment, testenvInstance) + + // Verify RF SF is met + testenv.VerifyRFSFMet(deployment, testenvInstance) + + // Verify Apps are downloaded by init-container + testenv.VerifyAppsDownloadedByInitContainer(deployment, testenvInstance, testenvInstance.GetName(), podNames, appFileList, initContDownloadLocation) + + // Verify apps are copied at the correct location on CM and on Deployer (/etc/apps/) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), podNames, appListV2, true, false) + + // Verify apps are installed locally on CM and on SHC Deployer + testenv.VerifyAppInstalled(deployment, testenvInstance, testenvInstance.GetName(), podNames, appListV2, true, "enabled", true, false) + + // Verify apps are not copied in /etc/master-apps/ on CM and /etc/shcluster/ on Deployer (therefore not installed on peers and on SH) + testenv.VerifyAppsCopied(deployment, testenvInstance, testenvInstance.GetName(), podNames, appListV2, false, true) + }) + }) + + Context("Clustered deployment (C3 - clustered indexer, search head cluster)", func() { + It("integration, c3, appframework: can deploy a C3 SVA and have ES app installed on SHC", func() { + + // ES is a huge file, we configure it here rather than in BeforeSuite/BeforeEach to save time for other tests + // Upload ES app to S3 + esApp := []string{"SplunkEnterpriseSecuritySuite"} + appListV1 = append(appListV1, esApp...) + appFileList := testenv.GetAppFileList(appListV1, 1) + + // Download ES App from S3 + err := testenv.DownloadFilesFromS3(testDataS3Bucket, s3AppDirV1, downloadDirV1, testenv.GetAppFileList(esApp, 1)) + Expect(err).To(Succeed(), "Unable to download ES app file") + + // Upload ES app to S3 + uploadedFiles, err := testenv.UploadFilesToS3(testS3Bucket, s3TestDir, testenv.GetAppFileList(esApp, 1), downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload ES app to S3 test directory") + uploadedApps = append(uploadedApps, uploadedFiles...) + + // Create App framework Spec + volumeName := "appframework-test-volume-" + testenv.RandomDNSName(3) + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateIndexVolumeSpec(volumeName, testenv.GetS3Endpoint(), testenvInstance.GetIndexSecretName(), "aws", "s3")} + + appSourceDefaultSpec := enterpriseApi.AppSourceDefaultSpec{ + VolName: volumeName, + Scope: "cluster", + } + appSourceName := "appframework-" + testenv.RandomDNSName(3) + appSourceSpec := []enterpriseApi.AppSourceSpec{testenv.GenerateAppSourceSpec(appSourceName, s3TestDir, appSourceDefaultSpec)} + appFrameworkSpec := enterpriseApi.AppFrameworkSpec{ + Defaults: appSourceDefaultSpec, + AppsRepoPollInterval: 60, + VolList: volumeSpec, + AppSources: appSourceSpec, + } + + // Create Single site Cluster and SHC, with App Framework enabled on SHC Deployer + // Deploy the CM + deployment.DeployClusterMaster(deployment.GetName(), "", "") + + // Deploy the indexer cluster + indexerReplicas := 3 + deployment.DeployIndexerCluster(deployment.GetName()+"-idxc", deployment.GetName(), indexerReplicas, deployment.GetName(), "") + + // Deploy the SHC + shSpec := enterpriseApi.SearchHeadClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "Always", + }, + ExtraEnv: []corev1.EnvVar{ + { + Name: "SPLUNK_ES_SSL_ENABLEMENT", + Value: "ignore"}, + }, + Volumes: []corev1.Volume{}, + ClusterMasterRef: corev1.ObjectReference{ + Name: deployment.GetName(), + }, + LivenessInitialDelaySeconds: 1450, + ReadinessInitialDelaySeconds: 1450, + }, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, + } + _, err = deployment.DeploySearchHeadClusterWithGivenSpec(deployment.GetName()+"-shc", shSpec) + Expect(err).To(Succeed(), "Unable to deploy SHC with App framework") + + // Ensure that the CM goes to Ready phase + testenv.ClusterMasterReady(deployment, testenvInstance) + + // Ensure Indexers go to Ready phase + testenv.SingleSiteIndexersReady(deployment, testenvInstance) + + // Ensure SHC go to Ready phase + testenv.SearchHeadClusterReady(deployment, testenvInstance) + + // Verify RF SF is met + testenv.VerifyRFSFMet(deployment, testenvInstance) + + // Verify Apps are downloaded by init-container + initContDownloadLocation := "/init-apps/" + appSourceName + deployerPod := []string{fmt.Sprintf(testenv.DeployerPod, deployment.GetName())} + testenv.VerifyAppsDownloadedByInitContainer(deployment, testenvInstance, testenvInstance.GetName(), deployerPod, appFileList, initContDownloadLocation) + + // Verify ES app is installed locally on SHC Deployer + testenv.VerifyAppInstalled(deployment, testenvInstance, testenvInstance.GetName(), deployerPod, esApp, false, "disabled", false, false) + + // Verify apps are installed on SHs + podNames := []string{} + for i := 0; i < int(shSpec.Replicas); i++ { + sh := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i) + podNames = append(podNames, string(sh)) + } + testenv.VerifyAppInstalled(deployment, testenvInstance, testenvInstance.GetName(), podNames, appListV1, false, "enabled", false, true) + }) + }) +}) diff --git a/test/custom_resource_crud/custom_resource_crud_c3_test.go b/test/custom_resource_crud/custom_resource_crud_c3_test.go index 3c2d03bd5..174a5dba9 100644 --- a/test/custom_resource_crud/custom_resource_crud_c3_test.go +++ b/test/custom_resource_crud/custom_resource_crud_c3_test.go @@ -19,7 +19,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" "github.com/splunk/splunk-operator/test/testenv" corev1 "k8s.io/api/core/v1" @@ -82,7 +82,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { } // Change CPU limits to trigger CR update - idxc := &enterprisev1.IndexerCluster{} + idxc := &enterpriseApi.IndexerCluster{} instanceName := fmt.Sprintf("%s-idxc", deployment.GetName()) err = deployment.GetInstance(instanceName, idxc) Expect(err).To(Succeed(), "Unable to get instance of indexer cluster") @@ -113,7 +113,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { } // Change CPU limits to trigger CR update - shc := &enterprisev1.SearchHeadCluster{} + shc := &enterpriseApi.SearchHeadCluster{} instanceName = fmt.Sprintf("%s-shc", deployment.GetName()) err = deployment.GetInstance(instanceName, shc) Expect(err).To(Succeed(), "Unable to fetch Search Head Cluster deployment") @@ -173,19 +173,19 @@ var _ = Describe("Crcrud test for SVA C3", func() { testenv.VerifyPVCsPerDeployment(deployment, testenvInstance, "cluster-master", 1, true, verificationTimeout) // Delete the Search Head Cluster - shc := &enterprisev1.SearchHeadCluster{} + shc := &enterpriseApi.SearchHeadCluster{} deployment.GetInstance(deployment.GetName()+"-shc", shc) err = deployment.DeleteCR(shc) Expect(err).To(Succeed(), "Unable to delete SHC instance", "SHC Name", shc) // Delete the Indexer Cluster - idxc := &enterprisev1.IndexerCluster{} + idxc := &enterpriseApi.IndexerCluster{} deployment.GetInstance(deployment.GetName()+"-idxc", idxc) err = deployment.DeleteCR(idxc) Expect(err).To(Succeed(), "Unable to delete IDXC instance", "IDXC Name", idxc) // Delete the Cluster Master - cm := &enterprisev1.ClusterMaster{} + cm := &enterpriseApi.ClusterMaster{} deployment.GetInstance(deployment.GetName(), cm) err = deployment.DeleteCR(cm) Expect(err).To(Succeed(), "Unable to delete CM instance", "CM Name", cm) diff --git a/test/custom_resource_crud/custom_resource_crud_m4_test.go b/test/custom_resource_crud/custom_resource_crud_m4_test.go index 6c9eea266..79435b74f 100644 --- a/test/custom_resource_crud/custom_resource_crud_m4_test.go +++ b/test/custom_resource_crud/custom_resource_crud_m4_test.go @@ -18,7 +18,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" "github.com/splunk/splunk-operator/test/testenv" corev1 "k8s.io/api/core/v1" @@ -82,7 +82,7 @@ var _ = Describe("Crcrud test for SVA M4", func() { } // Change CPU limits to trigger CR update - idxc := &enterprisev1.IndexerCluster{} + idxc := &enterpriseApi.IndexerCluster{} for i := 1; i <= siteCount; i++ { siteName := fmt.Sprintf("site%d", i) instanceName := fmt.Sprintf("%s-%s", deployment.GetName(), siteName) diff --git a/test/deploy-cluster.sh b/test/deploy-cluster.sh index f44589062..f52303f92 100755 --- a/test/deploy-cluster.sh +++ b/test/deploy-cluster.sh @@ -11,6 +11,11 @@ if [[ -z "${TEST_CLUSTER_PLATFORM}" ]]; then export TEST_CLUSTER_PLATFORM="${CLUSTER_PROVIDER}" fi +if [[ -z "${TEST_CLUSTER_NAME}" ]]; then + echo "Test Cluster Name Not Set in Environment Variables. Changing to env.sh value" + export TEST_CLUSTER_NAME="${CLUSTER_NAME}" +fi + if [[ -z "${CLUSTER_NODES}" ]]; then echo "Test Cluster Nodes Not Set in Environment Variables. Changing to env.sh value" export CLUSTER_NODES="${NUM_NODES}" diff --git a/test/deploy-eks-cluster.sh b/test/deploy-eks-cluster.sh index 34a148d5f..37820af08 100755 --- a/test/deploy-eks-cluster.sh +++ b/test/deploy-eks-cluster.sh @@ -16,9 +16,9 @@ if [[ -z "${ECR_REPOSITORY}" ]]; then fi function deleteCluster() { - eksctl delete cluster --name=${CLUSTER_NAME} + eksctl delete cluster --name=${TEST_CLUSTER_NAME} if [ $? -ne 0 ]; then - echo "Unable to delete cluster - ${CLUSTER_NAME}" + echo "Unable to delete cluster - ${TEST_CLUSTER_NAME}" return 1 fi @@ -33,17 +33,17 @@ function createCluster() { return 1 fi - found=$(eksctl get cluster --name "${CLUSTER_NAME}" -v 0) + found=$(eksctl get cluster --name "${TEST_CLUSTER_NAME}" -v 0) if [ -z "${found}" ]; then - eksctl create cluster --name=${CLUSTER_NAME} --nodes=${CLUSTER_WORKERS} --vpc-public-subnets=${EKS_VPC_PUBLIC_SUBNET_STRING} --vpc-private-subnets=${EKS_VPC_PRIVATE_SUBNET_STRING} + eksctl create cluster --name=${TEST_CLUSTER_NAME} --nodes=${CLUSTER_WORKERS} --vpc-public-subnets=${EKS_VPC_PUBLIC_SUBNET_STRING} --vpc-private-subnets=${EKS_VPC_PRIVATE_SUBNET_STRING} if [ $? -ne 0 ]; then - echo "Unable to create cluster - ${CLUSTER_NAME}" + echo "Unable to create cluster - ${TEST_CLUSTER_NAME}" return 1 fi else - echo "Retrieving kubeconfig for ${CLUSTER_NAME}" + echo "Retrieving kubeconfig for ${TEST_CLUSTER_NAME}" # Cluster exists but kubeconfig may not - eksctl utils write-kubeconfig --cluster=${CLUSTER_NAME} + eksctl utils write-kubeconfig --cluster=${TEST_CLUSTER_NAME} fi echo "Logging in to ECR" @@ -57,5 +57,5 @@ function createCluster() { # Login to ECR registry so images can be push and pull from later whe # Output echo "EKS cluster nodes:" - eksctl get cluster --name=${CLUSTER_NAME} + eksctl get cluster --name=${TEST_CLUSTER_NAME} } diff --git a/test/deploy-kind-cluster.sh b/test/deploy-kind-cluster.sh index fb67dda77..d63598355 100755 --- a/test/deploy-kind-cluster.sh +++ b/test/deploy-kind-cluster.sh @@ -4,9 +4,9 @@ reg_name='kind-registry' reg_port=$(echo $PRIVATE_REGISTRY | cut -d':' -f2) function deleteCluster() { - kind delete cluster --name=${CLUSTER_NAME} + kind delete cluster --name=${TEST_CLUSTER_NAME} if [ $? -ne 0 ]; then - echo "Unable to delete cluster - ${CLUSTER_NAME}" + echo "Unable to delete cluster - ${TEST_CLUSTER_NAME}" return 1 fi @@ -27,7 +27,7 @@ function createCluster() { return 1 fi - found=$(kind get clusters | grep "^${CLUSTER_NAME}$") + found=$(kind get clusters | grep "^${TEST_CLUSTER_NAME}$") if [ -z "$found" ]; then # create registry container unless it already exists running="$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" @@ -44,7 +44,7 @@ function createCluster() { done # create a cluster with the local registry enabled in containerd -cat < 0 { + if strings.Contains(strings.Split(stdout, "\n")[0], "Application is disabled") { + return "DISABLED", nil + } + return "ENABLED", nil + } + return "", err +} + +// GetAppFileList Get the Versioned App file list for app Names +func GetAppFileList(appList []string, version int) []string { + fileKey := fmt.Sprintf("V%dfilename", version) + appFileList := make([]string, 0, len(appList)) + for _, app := range appList { + appFileList = append(appFileList, AppInfo[app][fileKey]) + } + return appFileList +} diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 322a9664b..e84441292 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/client/config" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" ) @@ -91,13 +91,13 @@ func (d *Deployment) Teardown() error { } // DeployStandalone deploys a standalone splunk enterprise instance on the specified testenv -func (d *Deployment) DeployStandalone(name string) (*enterprisev1.Standalone, error) { +func (d *Deployment) DeployStandalone(name string) (*enterpriseApi.Standalone, error) { standalone := newStandalone(name, d.testenv.namespace) deployed, err := d.deployCR(name, standalone) if err != nil { return nil, err } - return deployed.(*enterprisev1.Standalone), err + return deployed.(*enterpriseApi.Standalone), err } // GetInstance retrieves the standalone, indexer, searchhead, licensemaster instance @@ -158,10 +158,10 @@ func (d *Deployment) PodExecCommand(podName string, cmd []string, stdin string, } //DeployLicenseMaster deploys the license master instance -func (d *Deployment) DeployLicenseMaster(name string) (*enterprisev1.LicenseMaster, error) { +func (d *Deployment) DeployLicenseMaster(name string) (*enterpriseApi.LicenseMaster, error) { if d.testenv.licenseFilePath == "" { - return nil, fmt.Errorf("No license file path specified") + return nil, fmt.Errorf("no license file path specified") } lm := newLicenseMaster(name, d.testenv.namespace, d.testenv.licenseCMName) @@ -169,48 +169,48 @@ func (d *Deployment) DeployLicenseMaster(name string) (*enterprisev1.LicenseMast if err != nil { return nil, err } - return deployed.(*enterprisev1.LicenseMaster), err + return deployed.(*enterpriseApi.LicenseMaster), err } //DeployClusterMaster deploys the cluster master -func (d *Deployment) DeployClusterMaster(name, licenseMasterName string, ansibleConfig string) (*enterprisev1.ClusterMaster, error) { +func (d *Deployment) DeployClusterMaster(name, licenseMasterName string, ansibleConfig string) (*enterpriseApi.ClusterMaster, error) { d.testenv.Log.Info("Deploying cluster-master", "name", name) cm := newClusterMaster(name, d.testenv.namespace, licenseMasterName, ansibleConfig) deployed, err := d.deployCR(name, cm) if err != nil { return nil, err } - return deployed.(*enterprisev1.ClusterMaster), err + return deployed.(*enterpriseApi.ClusterMaster), err } //DeployClusterMasterWithSmartStoreIndexes deploys the cluster master with smartstore indexes -func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(name, licenseMasterName string, ansibleConfig string, smartstorespec enterprisev1.SmartStoreSpec) (*enterprisev1.ClusterMaster, error) { +func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(name, licenseMasterName string, ansibleConfig string, smartstorespec enterpriseApi.SmartStoreSpec) (*enterpriseApi.ClusterMaster, error) { d.testenv.Log.Info("Deploying cluster-master", "name", name) cm := newClusterMasterWithGivenIndexes(name, d.testenv.namespace, licenseMasterName, ansibleConfig, smartstorespec) deployed, err := d.deployCR(name, cm) if err != nil { return nil, err } - return deployed.(*enterprisev1.ClusterMaster), err + return deployed.(*enterpriseApi.ClusterMaster), err } //DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(name, licenseMasterName string, count int, clusterMasterRef string, ansibleConfig string) (*enterprisev1.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(name, licenseMasterName string, count int, clusterMasterRef string, ansibleConfig string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name) indexer := newIndexerCluster(name, d.testenv.namespace, licenseMasterName, count, clusterMasterRef, ansibleConfig) deployed, err := d.deployCR(name, indexer) if err != nil { return nil, err } - return deployed.(*enterprisev1.IndexerCluster), err + return deployed.(*enterpriseApi.IndexerCluster), err } // DeploySearchHeadCluster deploys a search head cluster -func (d *Deployment) DeploySearchHeadCluster(name, clusterMasterRef, licenseMasterName string, ansibleConfig string) (*enterprisev1.SearchHeadCluster, error) { +func (d *Deployment) DeploySearchHeadCluster(name, clusterMasterRef, licenseMasterName string, ansibleConfig string) (*enterpriseApi.SearchHeadCluster, error) { d.testenv.Log.Info("Deploying search head cluster", "name", name) indexer := newSearchHeadCluster(name, d.testenv.namespace, clusterMasterRef, licenseMasterName, ansibleConfig) deployed, err := d.deployCR(name, indexer) - return deployed.(*enterprisev1.SearchHeadCluster), err + return deployed.(*enterpriseApi.SearchHeadCluster), err } func (d *Deployment) deployCR(name string, cr runtime.Object) (runtime.Object, error) { @@ -426,7 +426,7 @@ func (d *Deployment) DeployMultisiteCluster(name string, indexerReplicas int, si } // DeployStandaloneWithLM deploys a standalone splunk enterprise instance with license master on the specified testenv -func (d *Deployment) DeployStandaloneWithLM(name string) (*enterprisev1.Standalone, error) { +func (d *Deployment) DeployStandaloneWithLM(name string) (*enterpriseApi.Standalone, error) { var licenseMaster string // If license file specified, deploy License Master @@ -444,24 +444,24 @@ func (d *Deployment) DeployStandaloneWithLM(name string) (*enterprisev1.Standalo if err != nil { return nil, err } - return deployed.(*enterprisev1.Standalone), err + return deployed.(*enterpriseApi.Standalone), err } // DeployStandalonewithGivenSpec deploys a standalone with given spec -func (d *Deployment) DeployStandalonewithGivenSpec(name string, spec enterprisev1.StandaloneSpec) (*enterprisev1.Standalone, error) { +func (d *Deployment) DeployStandalonewithGivenSpec(name string, spec enterpriseApi.StandaloneSpec) (*enterpriseApi.Standalone, error) { standalone := newStandaloneWithGivenSpec(name, d.testenv.namespace, spec) deployed, err := d.deployCR(name, standalone) if err != nil { return nil, err } - return deployed.(*enterprisev1.Standalone), err + return deployed.(*enterpriseApi.Standalone), err } // DeployStandaloneWithGivenSmartStoreSpec deploys a standalone give smartstore spec -func (d *Deployment) DeployStandaloneWithGivenSmartStoreSpec(name string, smartStoreSpec enterprisev1.SmartStoreSpec) (*enterprisev1.Standalone, error) { +func (d *Deployment) DeployStandaloneWithGivenSmartStoreSpec(name string, smartStoreSpec enterpriseApi.SmartStoreSpec) (*enterpriseApi.Standalone, error) { - spec := enterprisev1.StandaloneSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + spec := enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: splcommon.Spec{ ImagePullPolicy: "IfNotPresent", }, @@ -475,11 +475,11 @@ func (d *Deployment) DeployStandaloneWithGivenSmartStoreSpec(name string, smartS if err != nil { return nil, err } - return deployed.(*enterprisev1.Standalone), err + return deployed.(*enterpriseApi.Standalone), err } // DeployMultisiteClusterWithSearchHeadAndIndexes deploys a lm, cluster-master, indexers in multiple sites and SH clusters -func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(name string, indexerReplicas int, siteCount int, indexesSecret string, smartStoreSpec enterprisev1.SmartStoreSpec) error { +func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(name string, indexerReplicas int, siteCount int, indexesSecret string, smartStoreSpec enterpriseApi.SmartStoreSpec) error { var licenseMaster string @@ -532,3 +532,201 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(name string, _, err = d.DeploySearchHeadCluster(name+"-shc", name, licenseMaster, siteDefaults) return err } + +// DeployClusterMasterWithGivenSpec deploys the cluster master with given SPEC +func (d *Deployment) DeployClusterMasterWithGivenSpec(name string, spec enterpriseApi.ClusterMasterSpec) (*enterpriseApi.ClusterMaster, error) { + d.testenv.Log.Info("Deploying cluster-master", "name", name) + cm := newClusterMasterWithGivenSpec(name, d.testenv.namespace, spec) + deployed, err := d.deployCR(name, cm) + if err != nil { + return nil, err + } + return deployed.(*enterpriseApi.ClusterMaster), err +} + +// DeploySearchHeadClusterWithGivenSpec deploys a search head cluster +func (d *Deployment) DeploySearchHeadClusterWithGivenSpec(name string, spec enterpriseApi.SearchHeadClusterSpec) (*enterpriseApi.SearchHeadCluster, error) { + d.testenv.Log.Info("Deploying search head cluster", "name", name) + indexer := newSearchHeadClusterWithGivenSpec(name, d.testenv.namespace, spec) + deployed, err := d.deployCR(name, indexer) + return deployed.(*enterpriseApi.SearchHeadCluster), err +} + +// DeployLicenseMasterWithGivenSpec deploys the license master with given SPEC +func (d *Deployment) DeployLicenseMasterWithGivenSpec(name string, spec enterpriseApi.LicenseMasterSpec) (*enterpriseApi.LicenseMaster, error) { + d.testenv.Log.Info("Deploying license-master", "name", name) + lm := newLicenseMasterWithGivenSpec(name, d.testenv.namespace, spec) + deployed, err := d.deployCR(name, lm) + if err != nil { + return nil, err + } + return deployed.(*enterpriseApi.LicenseMaster), err +} + +// DeploySingleSiteClusterWithGivenAppFrameworkSpec deploys indexer cluster (lm, shc optional) with app framework spec +func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(name string, indexerReplicas int, shc bool, appFrameworkSpec enterpriseApi.AppFrameworkSpec, delaySeconds int) error { + + licenseMaster := "" + + // If license file specified, deploy License Master + if d.testenv.licenseFilePath != "" { + // Deploy the license master + _, err := d.DeployLicenseMaster(name) + if err != nil { + return err + } + + licenseMaster = name + } + + // Deploy the cluster master + cmSpec := enterpriseApi.ClusterMasterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + LicenseMasterRef: corev1.ObjectReference{ + Name: licenseMaster, + }, + LivenessInitialDelaySeconds: int32(delaySeconds), + ReadinessInitialDelaySeconds: int32(delaySeconds), + }, + AppFrameworkConfig: appFrameworkSpec, + } + _, err := d.DeployClusterMasterWithGivenSpec(name, cmSpec) + if err != nil { + return err + } + + // Deploy the indexer cluster + _, err = d.DeployIndexerCluster(name+"-idxc", licenseMaster, indexerReplicas, name, "") + if err != nil { + return err + } + + shSpec := enterpriseApi.SearchHeadClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterMasterRef: corev1.ObjectReference{ + Name: name, + }, + LicenseMasterRef: corev1.ObjectReference{ + Name: licenseMaster, + }, + LivenessInitialDelaySeconds: int32(delaySeconds), + ReadinessInitialDelaySeconds: int32(delaySeconds), + }, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, + } + if shc { + _, err = d.DeploySearchHeadClusterWithGivenSpec(name+"-shc", shSpec) + if err != nil { + return err + } + } + + return nil +} + +// DeployMultisiteClusterWithSearchHeadAndAppFramework deploys cluster-master, indexers in multiple sites (SHC LM Optional) with app framework spec +func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(name string, indexerReplicas int, siteCount int, appFrameworkSpec enterpriseApi.AppFrameworkSpec, shc bool, delaySeconds int) error { + + licenseMaster := "" + + // If license file specified, deploy License Master + if d.testenv.licenseFilePath != "" { + // Deploy the license master + _, err := d.DeployLicenseMaster(name) + if err != nil { + return err + } + + licenseMaster = name + } + + // Deploy the cluster-master + defaults := `splunk: + multisite_master: localhost + all_sites: site1,site2,site3 + site: site1 + multisite_replication_factor_origin: 1 + multisite_replication_factor_total: 2 + multisite_search_factor_origin: 1 + multisite_search_factor_total: 2 + idxc: + search_factor: 2 + replication_factor: 2 +` + + // Cluster Master Spec + cmSpec := enterpriseApi.ClusterMasterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + LicenseMasterRef: corev1.ObjectReference{ + Name: licenseMaster, + }, + Defaults: defaults, + LivenessInitialDelaySeconds: int32(delaySeconds), + ReadinessInitialDelaySeconds: int32(delaySeconds), + }, + AppFrameworkConfig: appFrameworkSpec, + } + + _, err := d.DeployClusterMasterWithGivenSpec(name, cmSpec) + if err != nil { + return err + } + + // Deploy indexer sites + for site := 1; site <= siteCount; site++ { + siteName := fmt.Sprintf("site%d", site) + siteDefaults := fmt.Sprintf(`splunk: + multisite_master: splunk-%s-cluster-master-service + site: %s +`, name, siteName) + _, err := d.DeployIndexerCluster(name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults) + if err != nil { + return err + } + } + + siteDefaults := fmt.Sprintf(`splunk: + multisite_master: splunk-%s-cluster-master-service + site: site0 +`, name) + // Deploy the SH cluster + shSpec := enterpriseApi.SearchHeadClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "Always", + }, + Volumes: []corev1.Volume{}, + ClusterMasterRef: corev1.ObjectReference{ + Name: name, + }, + LicenseMasterRef: corev1.ObjectReference{ + Name: licenseMaster, + }, + Defaults: siteDefaults, + LivenessInitialDelaySeconds: int32(delaySeconds), + ReadinessInitialDelaySeconds: int32(delaySeconds), + }, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, + } + if shc { + _, err = d.DeploySearchHeadClusterWithGivenSpec(name+"-shc", shSpec) + if err != nil { + return err + } + } + return nil +} diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go index ec4d2656c..3e32022d5 100644 --- a/test/testenv/remote_index_utils.go +++ b/test/testenv/remote_index_utils.go @@ -3,7 +3,7 @@ package testenv import ( "encoding/json" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" logf "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -85,21 +85,23 @@ func RollHotToWarm(deployment *Deployment, podName string, indexName string) boo } // GenerateIndexVolumeSpec return VolumeSpec struct with given values -func GenerateIndexVolumeSpec(volumeName string, endpoint string, secretRef string) enterprisev1.VolumeSpec { - return enterprisev1.VolumeSpec{ +func GenerateIndexVolumeSpec(volumeName string, endpoint string, secretRef string, provider string, storageType string) enterpriseApi.VolumeSpec { + return enterpriseApi.VolumeSpec{ Name: volumeName, Endpoint: endpoint, Path: testIndexesS3Bucket, SecretRef: secretRef, + Provider: provider, + Type: storageType, } } // GenerateIndexSpec return VolumeSpec struct with given values -func GenerateIndexSpec(indexName string, volName string) enterprisev1.IndexSpec { - return enterprisev1.IndexSpec{ +func GenerateIndexSpec(indexName string, volName string) enterpriseApi.IndexSpec { + return enterpriseApi.IndexSpec{ Name: indexName, RemotePath: indexName, - IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + IndexAndGlobalCommonSpec: enterpriseApi.IndexAndGlobalCommonSpec{ VolName: volName, }, } diff --git a/test/testenv/s3utils.go b/test/testenv/s3utils.go index f051db318..d88a2c3f4 100644 --- a/test/testenv/s3utils.go +++ b/test/testenv/s3utils.go @@ -1,7 +1,9 @@ package testenv import ( + "errors" "os" + "path/filepath" "strings" "github.com/aws/aws-sdk-go/aws" @@ -38,53 +40,189 @@ func GetS3Endpoint() string { // CheckPrefixExistsOnS3 lists object in a bucket func CheckPrefixExistsOnS3(prefix string) bool { dataBucket := testIndexesS3Bucket - sess, err := session.NewSession(&aws.Config{Region: aws.String(s3Region)}) - if err != nil { - logf.Log.Error(err, "Failed to create s3 session") - } - svc := s3.New(session.Must(sess, err)) - resp, err := svc.ListObjects(&s3.ListObjectsInput{ - Bucket: aws.String(dataBucket), - Prefix: aws.String(prefix), - }) - - if err != nil { - logf.Log.Error(err, "Failed to list objects on s3 bucket") - return false - } - for _, key := range resp.Contents { + resp := GetFileListOnS3(dataBucket, prefix) + for _, key := range resp { logf.Log.Info("CHECKING KEY ", "KEY", *key.Key) if strings.Contains(*key.Key, prefix) { logf.Log.Info("Prefix found on bucket", "Prefix", prefix, "KEY", *key.Key) return true } } - return false } -// DownloadFromS3Bucket downloads license file from S3 -func DownloadFromS3Bucket() (string, error) { - dataBucket := testS3Bucket +// DownloadLicenseFromS3Bucket downloads license file from S3 +func DownloadLicenseFromS3Bucket() (string, error) { location := enterpriseLicenseLocation item := "enterprise.lic" - file, err := os.Create(item) + dataBucket := testS3Bucket + filename, err := DownloadFileFromS3(dataBucket, item, location, ".") + return filename, err +} + +// S3Session Create session object for S3 bucket connection +func S3Session() (*session.Session, error) { + sess, err := session.NewSession(&aws.Config{Region: aws.String(s3Region)}) + if err != nil { + logf.Log.Error(err, "Failed to create session to S3") + } + return sess, err +} + +// DownloadFileFromS3 downloads file from S3 +func DownloadFileFromS3(dataBucket string, filename string, s3FilePath string, downloadDir string) (string, error) { + // Check Directory to download files exists + if _, err := os.Stat(downloadDir); errors.Is(err, os.ErrNotExist) { + err := os.Mkdir(downloadDir, os.ModePerm) + if err != nil { + logf.Log.Error(err, "Unable to create directory to download apps") + return "", err + } + } + + // Create empty file on OS File System + file, err := os.Create(filepath.Join(downloadDir, filename)) if err != nil { - logf.Log.Error(err, "Failed to create license file") + logf.Log.Error(err, "Failed to create file", "Filename", file) } defer file.Close() - sess, _ := session.NewSession(&aws.Config{Region: aws.String(s3Region)}) + + sess, err := S3Session() + if err != nil { + return "", err + } + downloader := s3manager.NewDownloader(sess) numBytes, err := downloader.Download(file, &s3.GetObjectInput{ Bucket: aws.String(dataBucket), - Key: aws.String(location + "/" + "enterprise.lic"), + Key: aws.String(s3FilePath + "/" + filename), }) + if err != nil { - logf.Log.Error(err, "Failed to download license file") + logf.Log.Error(err, "Failed to download file", "Filename", filename) + return "", err } logf.Log.Info("Downloaded", "filename", file.Name(), "bytes", numBytes) return file.Name(), err } + +// UploadFileToS3 upload file to S3 +func UploadFileToS3(dataBucket string, filename string, path string, file *os.File) (string, error) { + sess, err := S3Session() + if err == nil { + uploader := s3manager.NewUploader(sess) + numBytes, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(dataBucket), + Key: aws.String(filepath.Join(path, filename)), // Name of the file to be saved + Body: file, + }) + if err != nil { + logf.Log.Error(err, "Error in file upload") + } + logf.Log.Info("Uploaded", "filename", file.Name(), "bytes", numBytes) + } + return file.Name(), err +} + +// GetFileListOnS3 lists object in a bucket +func GetFileListOnS3(dataBucket string, path string) []*s3.Object { + sess, err := S3Session() + svc := s3.New(session.Must(sess, err)) + resp, err := svc.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(dataBucket), + Prefix: aws.String(path), + }) + if err != nil { + logf.Log.Error(err, "Failed to list objects on s3 bucket") + return nil + } + + return resp.Contents +} + +// DeleteFilesOnS3 Delete a list of file on S3 Bucket +func DeleteFilesOnS3(bucket string, filenames []string) error { + for _, file := range filenames { + err := DeleteFileOnS3(bucket, file) + if err != nil { + return err + } + } + return nil +} + +// DeleteFileOnS3 Delete a given file on S3 Bucket +func DeleteFileOnS3(bucket string, filename string) error { + sess, err := S3Session() + if err != nil { + return err + } + svc := s3.New(sess) + _, err = svc.DeleteObject(&s3.DeleteObjectInput{Bucket: aws.String(bucket), Key: aws.String(filename)}) + if err != nil { + logf.Log.Error(err, "Unable to delete object from bucket", "Object Name", filename, "Bucket Name", bucket) + } + + err = svc.WaitUntilObjectNotExists(&s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(filename), + }) + logf.Log.Info("Deleted file on S3", "File Name", filename, "Bucket", bucket) + return err +} + +// GetFilesInPathOnS3 return list of file name under a given path on S3 +func GetFilesInPathOnS3(bucket string, path string) []string { + resp := GetFileListOnS3(bucket, path) + var files []string + for _, key := range resp { + logf.Log.Info("CHECKING KEY ", "KEY", *key.Key) + if strings.Contains(*key.Key, path) { + filename := strings.Replace(*key.Key, path, "", -1) + // This condition filters out directories as GetFileListOnS3 returns files and directories with their absolute path's + if len(filename) > 1 { + logf.Log.Info("File found on bucket", "Prefix", path, "KEY", *key.Key) + files = append(files, filename) + } + } + } + return files +} + +// DownloadFilesFromS3 download given list of files from S3 to the given directory +func DownloadFilesFromS3(testDataS3Bucket string, s3AppDir string, downloadDir string, appList []string) error { + for _, key := range appList { + logf.Log.Info("Downloading app from S3", "App Name", key) + _, err := DownloadFileFromS3(testDataS3Bucket, key, s3AppDir, downloadDir) + if err != nil { + logf.Log.Error(err, "Unable to downlaod file", "File Name", key) + return err + } + } + return nil +} + +// UploadFilesToS3 upload given list of file to given location on a S3 bucket +func UploadFilesToS3(testS3Bucket string, s3TestDir string, applist []string, downloadDir string) ([]string, error) { + var uploadedFiles []string + for _, key := range applist { + logf.Log.Info("Uploading app to s3", "App Name", key) + fileLocation := filepath.Join(downloadDir, key) + fileBody, err := os.Open(fileLocation) + if err != nil { + logf.Log.Error(err, "Unable to open app file", "App Name", key) + return nil, err + } + fileName, err := UploadFileToS3(testS3Bucket, key, s3TestDir, fileBody) + if err != nil { + logf.Log.Error(err, "Unable to upload file", "File Name", key) + return nil, err + } + logf.Log.Info("App upload to test S3", "App Name", fileName) + uploadedFiles = append(uploadedFiles, fileName) + } + return uploadedFiles, nil +} diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index cb828a19d..99def9a63 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" ) const ( @@ -62,6 +62,9 @@ const ( // SearchHeadPod Template String for search head pod SearchHeadPod = "splunk-%s-shc-search-head-%d" + // DeployerPod Template String for deployer pod + DeployerPod = "splunk-%s-shc-deployer-0" + // StandalonePod Template String for standalone pod StandalonePod = "splunk-%s-standalone-%d" @@ -189,7 +192,7 @@ func NewTestEnv(name, commitHash, operatorImage, splunkImage, licenseFilePath st testenv.Log = logf.Log.WithValues("testenv", testenv.name) // Scheme - enterprisev1.SchemeBuilder.AddToScheme(scheme.Scheme) + enterpriseApi.SchemeBuilder.AddToScheme(scheme.Scheme) // Get a config to talk to the apiserver cfg, err := config.GetConfig() @@ -610,3 +613,8 @@ func (testenv *TestEnv) NewDeployment(name string) (*Deployment, error) { return &d, nil } + +// GetLMConfigMap Return name of license config map +func (testenv *TestEnv) GetLMConfigMap() string { + return testenv.licenseCMName +} diff --git a/test/testenv/util.go b/test/testenv/util.go index 555627f5c..96d92eadc 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -20,6 +20,8 @@ import ( "math/rand" "os/exec" "path" + "reflect" + "sort" "strings" "time" @@ -31,7 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logf "sigs.k8s.io/controller-runtime/pkg/log" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" ) @@ -61,9 +63,9 @@ func RandomDNSName(n int) string { } // newStandalone creates and initializes CR for Standalone Kind -func newStandalone(name, ns string) *enterprisev1.Standalone { +func newStandalone(name, ns string) *enterpriseApi.Standalone { - new := enterprisev1.Standalone{ + new := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -73,8 +75,8 @@ func newStandalone(name, ns string) *enterprisev1.Standalone { Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, - Spec: enterprisev1.StandaloneSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: splcommon.Spec{ ImagePullPolicy: "IfNotPresent", }, @@ -87,9 +89,9 @@ func newStandalone(name, ns string) *enterprisev1.Standalone { } // newStandalone creates and initializes CR for Standalone Kind -func newStandaloneWithGivenSpec(name, ns string, spec enterprisev1.StandaloneSpec) *enterprisev1.Standalone { +func newStandaloneWithGivenSpec(name, ns string, spec enterpriseApi.StandaloneSpec) *enterpriseApi.Standalone { - new := enterprisev1.Standalone{ + new := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -104,8 +106,8 @@ func newStandaloneWithGivenSpec(name, ns string, spec enterprisev1.StandaloneSpe return &new } -func newLicenseMaster(name, ns, licenseConfigMapName string) *enterprisev1.LicenseMaster { - new := enterprisev1.LicenseMaster{ +func newLicenseMaster(name, ns, licenseConfigMapName string) *enterpriseApi.LicenseMaster { + new := enterpriseApi.LicenseMaster{ TypeMeta: metav1.TypeMeta{ Kind: "LicenseMaster", }, @@ -115,8 +117,8 @@ func newLicenseMaster(name, ns, licenseConfigMapName string) *enterprisev1.Licen Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, - Spec: enterprisev1.LicenseMasterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.LicenseMasterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Volumes: []corev1.Volume{ { Name: "licenses", @@ -142,8 +144,8 @@ func newLicenseMaster(name, ns, licenseConfigMapName string) *enterprisev1.Licen } // newClusterMaster creates and initialize the CR for ClusterMaster Kind -func newClusterMaster(name, ns, licenseMasterName string, ansibleConfig string) *enterprisev1.ClusterMaster { - new := enterprisev1.ClusterMaster{ +func newClusterMaster(name, ns, licenseMasterName string, ansibleConfig string) *enterpriseApi.ClusterMaster { + new := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, @@ -153,8 +155,8 @@ func newClusterMaster(name, ns, licenseMasterName string, ansibleConfig string) Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, - Spec: enterprisev1.ClusterMasterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Volumes: []corev1.Volume{}, Spec: splcommon.Spec{ ImagePullPolicy: "IfNotPresent", @@ -171,8 +173,8 @@ func newClusterMaster(name, ns, licenseMasterName string, ansibleConfig string) } // newClusterMaster creates and initialize the CR for ClusterMaster Kind -func newClusterMasterWithGivenIndexes(name, ns, licenseMasterName string, ansibleConfig string, smartstorespec enterprisev1.SmartStoreSpec) *enterprisev1.ClusterMaster { - new := enterprisev1.ClusterMaster{ +func newClusterMasterWithGivenIndexes(name, ns, licenseMasterName string, ansibleConfig string, smartstorespec enterpriseApi.SmartStoreSpec) *enterpriseApi.ClusterMaster { + new := enterpriseApi.ClusterMaster{ TypeMeta: metav1.TypeMeta{ Kind: "ClusterMaster", }, @@ -182,9 +184,9 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseMasterName string, ansibl Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, - Spec: enterprisev1.ClusterMasterSpec{ + Spec: enterpriseApi.ClusterMasterSpec{ SmartStore: smartstorespec, - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Volumes: []corev1.Volume{}, Spec: splcommon.Spec{ ImagePullPolicy: "IfNotPresent", @@ -201,8 +203,8 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseMasterName string, ansibl } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseMasterName string, replicas int, clusterMasterRef string, ansibleConfig string) *enterprisev1.IndexerCluster { - new := enterprisev1.IndexerCluster{ +func newIndexerCluster(name, ns, licenseMasterName string, replicas int, clusterMasterRef string, ansibleConfig string) *enterpriseApi.IndexerCluster { + new := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -212,8 +214,8 @@ func newIndexerCluster(name, ns, licenseMasterName string, replicas int, cluster Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, - Spec: enterprisev1.IndexerClusterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.IndexerClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Volumes: []corev1.Volume{}, Spec: splcommon.Spec{ ImagePullPolicy: "IfNotPresent", @@ -230,8 +232,8 @@ func newIndexerCluster(name, ns, licenseMasterName string, replicas int, cluster return &new } -func newSearchHeadCluster(name, ns, clusterMasterRef, licenseMasterName string, ansibleConfig string) *enterprisev1.SearchHeadCluster { - new := enterprisev1.SearchHeadCluster{ +func newSearchHeadCluster(name, ns, clusterMasterRef, licenseMasterName string, ansibleConfig string) *enterpriseApi.SearchHeadCluster { + new := enterpriseApi.SearchHeadCluster{ TypeMeta: metav1.TypeMeta{ Kind: "SearchHeadCluster", }, @@ -241,8 +243,8 @@ func newSearchHeadCluster(name, ns, clusterMasterRef, licenseMasterName string, Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, - Spec: enterprisev1.SearchHeadClusterSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.SearchHeadClusterSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Volumes: []corev1.Volume{}, Spec: splcommon.Spec{ ImagePullPolicy: "IfNotPresent", @@ -401,9 +403,9 @@ func newOperator(name, ns, account, operatorImageAndTag, splunkEnterpriseImageAn } // newStandaloneWithLM creates and initializes CR for Standalone Kind with License Master -func newStandaloneWithLM(name, ns string, licenseMasterName string) *enterprisev1.Standalone { +func newStandaloneWithLM(name, ns string, licenseMasterName string) *enterpriseApi.Standalone { - new := enterprisev1.Standalone{ + new := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -413,8 +415,8 @@ func newStandaloneWithLM(name, ns string, licenseMasterName string) *enterprisev Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, - Spec: enterprisev1.StandaloneSpec{ - CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: enterpriseApi.StandaloneSpec{ + CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: splcommon.Spec{ ImagePullPolicy: "IfNotPresent", }, @@ -447,9 +449,9 @@ func newSecretSpec(ns string, secretName string, data map[string][]byte) *corev1 } // newStandaloneWithSpec creates and initializes CR for Standalone Kind with given spec -func newStandaloneWithSpec(name, ns string, spec enterprisev1.StandaloneSpec) *enterprisev1.Standalone { +func newStandaloneWithSpec(name, ns string, spec enterpriseApi.StandaloneSpec) *enterpriseApi.Standalone { - new := enterprisev1.Standalone{ + new := enterpriseApi.Standalone{ TypeMeta: metav1.TypeMeta{ Kind: "Standalone", }, @@ -523,7 +525,7 @@ func GetConfLineFromPod(podName string, filePath string, ns string, configName s continue } // Look for given config name in file - if stanzaFound == false { + if !stanzaFound { if strings.HasPrefix(line, stanzaString) { stanzaFound = true } @@ -535,7 +537,7 @@ func GetConfLineFromPod(podName string, filePath string, ns string, configName s } } if config == "" { - err = fmt.Errorf("Failed to find config %s under stanza %s", configName, stanza) + err = fmt.Errorf("failed to find config %s under stanza %s", configName, stanza) } return config, err } @@ -551,3 +553,93 @@ func ExecuteCommandOnPod(deployment *Deployment, podName string, stdin string) ( logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) return stdout, nil } + +// newClusterMasterWithGivenSpec creates and initialize the CR for ClusterMaster Kind +func newClusterMasterWithGivenSpec(name string, ns string, spec enterpriseApi.ClusterMasterSpec) *enterpriseApi.ClusterMaster { + new := enterpriseApi.ClusterMaster{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterMaster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, + }, + Spec: spec, + } + return &new +} + +// newSearchHeadClusterWithGivenSpec create and initializes CR for Search Cluster Kind with Given Spec +func newSearchHeadClusterWithGivenSpec(name string, ns string, spec enterpriseApi.SearchHeadClusterSpec) *enterpriseApi.SearchHeadCluster { + new := enterpriseApi.SearchHeadCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "SearchHeadCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, + }, + Spec: spec, + } + return &new +} + +// newLicenseMasterWithGivenSpec create and initializes CR for License Master Kind with Given Spec +func newLicenseMasterWithGivenSpec(name, ns string, spec enterpriseApi.LicenseMasterSpec) *enterpriseApi.LicenseMaster { + new := enterpriseApi.LicenseMaster{ + TypeMeta: metav1.TypeMeta{ + Kind: "LicenseMaster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, + }, + + Spec: spec, + } + + return &new +} + +// GetDirsOrFilesInPath returns subdirectory under given path on the given POD +func GetDirsOrFilesInPath(deployment *Deployment, podName string, path string, dirOnly bool) ([]string, error) { + var cmd string + if dirOnly { + cmd = fmt.Sprintf("cd %s; ls -d */", path) + } else { + cmd = fmt.Sprintf("cd %s; ls ", path) + } + stdout, err := ExecuteCommandOnPod(deployment, podName, cmd) + if err != nil { + return nil, err + } + dirList := strings.Fields(stdout) + // Directory are returned with trailing /. The below loop removes the trailing / + for i, dirName := range dirList { + dirList[i] = strings.TrimSuffix(dirName, "/") + } + return strings.Fields(stdout), err +} + +// CompareStringSlices checks if two string slices are matching +func CompareStringSlices(stringOne []string, stringTwo []string) bool { + if len(stringOne) != len(stringTwo) { + return false + } + sort.Strings(stringOne) + sort.Strings(stringTwo) + return reflect.DeepEqual(stringOne, stringTwo) +} + +// CheckStringInSlice check if string is present in a slice +func CheckStringInSlice(stringSlice []string, compString string) bool { + for _, item := range stringSlice { + if item == compString { + return true + } + } + return false +} diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index cf846210d..96d067458 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -24,7 +24,7 @@ import ( gomega "github.com/onsi/gomega" - enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1" + enterpriseApi "github.com/splunk/splunk-operator/pkg/apis/enterprise/v2" splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" logf "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -60,7 +60,7 @@ type PodDetailsStruct struct { } // StandaloneReady verify Standlone is in ReadyStatus and does not flip-flop -func StandaloneReady(deployment *Deployment, deploymentName string, standalone *enterprisev1.Standalone, testenvInstance *TestEnv) { +func StandaloneReady(deployment *Deployment, deploymentName string, standalone *enterpriseApi.Standalone, testenvInstance *TestEnv) { gomega.Eventually(func() splcommon.Phase { err := deployment.GetInstance(deploymentName, standalone) if err != nil { @@ -80,7 +80,7 @@ func StandaloneReady(deployment *Deployment, deploymentName string, standalone * // SearchHeadClusterReady verify SHC is in READY status and does not flip-flop func SearchHeadClusterReady(deployment *Deployment, testenvInstance *TestEnv) { - shc := &enterprisev1.SearchHeadCluster{} + shc := &enterpriseApi.SearchHeadCluster{} instanceName := fmt.Sprintf("%s-shc", deployment.GetName()) gomega.Eventually(func() splcommon.Phase { err := deployment.GetInstance(instanceName, shc) @@ -92,6 +92,26 @@ func SearchHeadClusterReady(deployment *Deployment, testenvInstance *TestEnv) { return shc.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(splcommon.PhaseReady)) + gomega.Eventually(func() splcommon.Phase { + err := deployment.GetInstance(instanceName, shc) + if err != nil { + return splcommon.PhaseError + } + testenvInstance.Log.Info("Waiting for Deployer STATUS to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.DeployerPhase) + DumpGetPods(testenvInstance.GetName()) + return shc.Status.DeployerPhase + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(splcommon.PhaseReady)) + + gomega.Eventually(func() splcommon.Phase { + err := deployment.GetInstance(instanceName, shc) + if err != nil { + return splcommon.PhaseError + } + testenvInstance.Log.Info("Waiting for search head cluster STATUS to be ready", "instance", shc.ObjectMeta.Name, "Phase", shc.Status.Phase) + DumpGetPods(testenvInstance.GetName()) + return shc.Status.Phase + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(splcommon.PhaseReady)) + // In a steady state, we should stay in Ready and not flip-flop around gomega.Consistently(func() splcommon.Phase { _ = deployment.GetInstance(deployment.GetName(), shc) @@ -101,7 +121,7 @@ func SearchHeadClusterReady(deployment *Deployment, testenvInstance *TestEnv) { // SingleSiteIndexersReady verify single site indexers go to ready state func SingleSiteIndexersReady(deployment *Deployment, testenvInstance *TestEnv) { - idc := &enterprisev1.IndexerCluster{} + idc := &enterpriseApi.IndexerCluster{} instanceName := fmt.Sprintf("%s-idxc", deployment.GetName()) gomega.Eventually(func() splcommon.Phase { err := deployment.GetInstance(instanceName, idc) @@ -123,7 +143,7 @@ func SingleSiteIndexersReady(deployment *Deployment, testenvInstance *TestEnv) { // ClusterMasterReady verify Cluster Master Instance is in ready status func ClusterMasterReady(deployment *Deployment, testenvInstance *TestEnv) { // Ensure that the cluster-master goes to Ready phase - cm := &enterprisev1.ClusterMaster{} + cm := &enterpriseApi.ClusterMaster{} gomega.Eventually(func() splcommon.Phase { err := deployment.GetInstance(deployment.GetName(), cm) if err != nil { @@ -150,7 +170,7 @@ func IndexersReady(deployment *Deployment, testenvInstance *TestEnv, siteCount i instanceName := fmt.Sprintf("%s-%s", deployment.GetName(), siteName) siteIndexerMap[siteName] = []string{fmt.Sprintf("splunk-%s-indexer-0", instanceName)} // Ensure indexers go to Ready phase - idc := &enterprisev1.IndexerCluster{} + idc := &enterpriseApi.IndexerCluster{} gomega.Eventually(func() splcommon.Phase { err := deployment.GetInstance(instanceName, idc) if err != nil { @@ -229,7 +249,7 @@ func VerifyNoSHCInNamespace(deployment *Deployment, testenvInstance *TestEnv) { // LicenseMasterReady verify LM is in ready status and does not flip flop func LicenseMasterReady(deployment *Deployment, testenvInstance *TestEnv) { - licenseMaster := &enterprisev1.LicenseMaster{} + licenseMaster := &enterpriseApi.LicenseMaster{} testenvInstance.Log.Info("Verifying License Master becomes READY") gomega.Eventually(func() splcommon.Phase { @@ -293,7 +313,7 @@ func VerifyIndexConfigsMatch(deployment *Deployment, podName string, indexName s gomega.Consistently(func() bool { indexFound, data := GetIndexOnPod(deployment, podName, indexName) logf.Log.Info("Checking status of index on pod", "PODNAME", podName, "INDEX NAME", indexName, "STATUS", indexFound) - if indexFound == true { + if indexFound { if data.Content.MaxGlobalDataSizeMB == maxGlobalDataSizeMB && data.Content.MaxGlobalRawDataSizeMB == maxGlobalRawDataSizeMB { logf.Log.Info("Checking index configs", "MaxGlobalDataSizeMB", data.Content.MaxGlobalDataSizeMB, "MaxGlobalRawDataSizeMB", data.Content.MaxGlobalRawDataSizeMB) return true @@ -346,7 +366,7 @@ func VerifyConfOnPod(deployment *Deployment, namespace string, podName string, c // VerifySearchHeadClusterPhase verify the phase of SHC matches given phase func VerifySearchHeadClusterPhase(deployment *Deployment, testenvInstance *TestEnv, phase splcommon.Phase) { gomega.Eventually(func() splcommon.Phase { - shc := &enterprisev1.SearchHeadCluster{} + shc := &enterpriseApi.SearchHeadCluster{} shcName := deployment.GetName() + "-shc" err := deployment.GetInstance(shcName, shc) if err != nil { @@ -361,7 +381,7 @@ func VerifySearchHeadClusterPhase(deployment *Deployment, testenvInstance *TestE // VerifyIndexerClusterPhase verify the phase of idxc matches the given phase func VerifyIndexerClusterPhase(deployment *Deployment, testenvInstance *TestEnv, phase splcommon.Phase, idxcName string) { gomega.Eventually(func() splcommon.Phase { - idxc := &enterprisev1.IndexerCluster{} + idxc := &enterpriseApi.IndexerCluster{} err := deployment.GetInstance(idxcName, idxc) if err != nil { return splcommon.PhaseError @@ -375,7 +395,7 @@ func VerifyIndexerClusterPhase(deployment *Deployment, testenvInstance *TestEnv, // VerifyStandalonePhase verify the phase of Standalone CR func VerifyStandalonePhase(deployment *Deployment, testenvInstance *TestEnv, crName string, phase splcommon.Phase) { gomega.Eventually(func() splcommon.Phase { - standalone := &enterprisev1.Standalone{} + standalone := &enterpriseApi.Standalone{} err := deployment.GetInstance(deployment.GetName(), standalone) if err != nil { return splcommon.PhaseError @@ -415,7 +435,7 @@ func VerifyCPULimits(deployment *Deployment, ns string, podName string, expected // VerifyClusterMasterPhase verify phase of cluster master func VerifyClusterMasterPhase(deployment *Deployment, testenvInstance *TestEnv, phase splcommon.Phase) { - cm := &enterprisev1.ClusterMaster{} + cm := &enterpriseApi.ClusterMaster{} gomega.Eventually(func() splcommon.Phase { err := deployment.GetInstance(deployment.GetName(), cm) if err != nil { @@ -558,3 +578,89 @@ func VerifyPVCsPerDeployment(deployment *Deployment, testenvInstance *TestEnv, d } } } + +// VerifyAppInstalled verify that app of specific version is installed. Method assumes that app is installed in all CR's in namespace +func VerifyAppInstalled(deployment *Deployment, testenvInstance *TestEnv, ns string, pods []string, apps []string, versionCheck bool, statusCheck string, checkupdated bool, clusterWideInstall bool) { + for _, podName := range pods { + if !strings.Contains(podName, "monitoring-console") { + for _, appName := range apps { + status, versionInstalled, err := GetPodAppStatus(deployment, podName, ns, appName, clusterWideInstall) + logf.Log.Info("App info returned for app", "App-name", appName, "status", status, "versionInstalled", versionInstalled, "error", err) + gomega.Expect(err).To(gomega.Succeed(), "Unable to get app status on pod ") + comparison := strings.EqualFold(status, statusCheck) + //Check the app is installed on specific pods and un-installed on others for cluster-wide install + if clusterWideInstall { + if strings.Contains(podName, "-indexer-") || strings.Contains(podName, "-search-head-") { + gomega.Expect(comparison).Should(gomega.Equal(true)) + } + } else { + // For local install check pods individually + if strings.Contains(podName, "-indexer-") || strings.Contains(podName, "-search-head-") { + gomega.Expect(comparison).Should(gomega.Equal(false)) + } else { + gomega.Expect(comparison).Should(gomega.Equal(true)) + } + } + if versionCheck { + // For clusterwide install do not check for versions on deployer and cluster-master as the apps arent installed there + if !(clusterWideInstall && (strings.Contains(podName, "-deployer-") || strings.Contains(podName, "-cluster-master-"))) { + if checkupdated { + gomega.Expect(versionInstalled).Should(gomega.Equal(AppInfo[appName]["V2"])) + } else { + gomega.Expect(versionInstalled).Should(gomega.Equal(AppInfo[appName]["V1"])) + } + } + } + } + } + } +} + +// VerifyAppsCopied verify that apps are copied to correct location based on POD +func VerifyAppsCopied(deployment *Deployment, testenvInstance *TestEnv, ns string, pods []string, apps []string, checkAppDirectory bool, clusterWideInstall bool) { + for _, podName := range pods { + if !strings.Contains(podName, "monitoring-console") { + path := "etc/apps" + //For cluster-wide install the apps are extracted to different locations + if clusterWideInstall { + if strings.Contains(podName, "cluster-master") { + path = "etc/master-apps/" + } else if strings.Contains(podName, "-deployer-") { + path = "etc/shcluster/apps" + } else if strings.Contains(podName, "-indexer-") { + path = "etc/slave-apps/" + } + } + VerifyAppsInFolder(deployment, testenvInstance, ns, podName, apps, path, checkAppDirectory) + } + } +} + +// VerifyAppsInFolder verify that apps are present in folder +func VerifyAppsInFolder(deployment *Deployment, testenvInstance *TestEnv, ns string, podName string, apps []string, path string, checkAppDirectory bool) { + gomega.Eventually(func() bool { + appList, err := GetDirsOrFilesInPath(deployment, podName, path, checkAppDirectory) + gomega.Expect(err).To(gomega.Succeed(), "Unable to get apps on pod", "Pod", podName) + for _, app := range apps { + folderName := app + "/" + found := CheckStringInSlice(appList, folderName) + logf.Log.Info("Copy Status for app", "App-name", folderName, "status", found) + if found != checkAppDirectory { + return false + } + } + return true + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) +} + +// VerifyAppsDownloadedByInitContainer verify that apps are downloaded by init container +func VerifyAppsDownloadedByInitContainer(deployment *Deployment, testenvInstance *TestEnv, ns string, pods []string, apps []string, path string) { + for _, podName := range pods { + appList, err := GetDirsOrFilesInPath(deployment, podName, path, false) + gomega.Expect(err).To(gomega.Succeed(), "Unable to get apps on pod", "Pod", podName) + for _, app := range apps { + found := CheckStringInSlice(appList, app) + gomega.Expect(found).Should(gomega.Equal(true)) + } + } +} diff --git a/version/version.go b/version/version.go index d0d3c1ded..658904853 100644 --- a/version/version.go +++ b/version/version.go @@ -16,5 +16,5 @@ package version var ( // Version of splunk-operator - Version = "1.0.1" + Version = "1.0.2" )