From 0920815852c6d9cf90755c84ee4e8633c12416d0 Mon Sep 17 00:00:00 2001 From: mgaldino Date: Thu, 2 Sep 2021 11:48:40 -0700 Subject: [PATCH 1/2] Bias-language removal P1 Target: Comments and documentation Not changed: Urls, paths, examples --- docs/AppFramework.md | 22 +++---- docs/ChangeLog.md | 10 +-- docs/Examples.md | 46 +++++++------- docs/Ingress.md | 2 +- docs/MultisiteExamples.md | 10 +-- docs/SmartStore.md | 6 +- docs/SplunkOperatorUpgrade.md | 2 +- pkg/apis/enterprise/v2/clustermaster_types.go | 4 +- pkg/apis/enterprise/v2/common_types.go | 4 +- .../enterprise/v2/indexercluster_types.go | 8 +-- pkg/apis/enterprise/v2/licensemaster_types.go | 12 ++-- pkg/splunk/client/enterprise.go | 50 +++++++-------- pkg/splunk/common/messages.go | 2 +- pkg/splunk/enterprise/clustermaster.go | 16 ++--- pkg/splunk/enterprise/configuration.go | 10 +-- pkg/splunk/enterprise/indexercluster.go | 12 ++-- pkg/splunk/enterprise/licensemaster.go | 4 +- pkg/splunk/enterprise/licensemaster_test.go | 2 +- pkg/splunk/enterprise/monitoringconsole.go | 6 +- pkg/splunk/enterprise/searchheadcluster.go | 2 +- pkg/splunk/enterprise/types.go | 6 +- pkg/splunk/enterprise/util.go | 2 +- pkg/splunk/enterprise/util_test.go | 4 +- test/README.md | 2 +- test/c3/appframework/appframework_test.go | 8 +-- .../custom_resource_crud_c3_test.go | 10 +-- .../custom_resource_crud_m4_test.go | 2 +- test/delete_cr/deletecr_test.go | 2 +- test/env.sh | 2 +- test/licensemaster/lm_c3_test.go | 2 +- test/licensemaster/lm_m4_test.go | 2 +- test/licensemaster/lm_s1_test.go | 4 +- test/m4/appframework/appframework_test.go | 6 +- .../monitoring_console_test.go | 2 +- test/scaling_test/scaling_test.go | 10 +-- test/secret/secret_c3_test.go | 10 +-- test/secret/secret_m4_test.go | 10 +-- test/secret/secret_s1_test.go | 14 ++--- test/smartstore/smartstore_test.go | 2 +- test/smoke/cluster_master_sites_response.go | 2 +- test/smoke/smoke_test.go | 8 +-- test/testenv/appframework_utils.go | 2 +- test/testenv/cmutil.go | 10 +-- test/testenv/deployment.go | 62 +++++++++---------- test/testenv/util.go | 4 +- test/testenv/verificationutils.go | 12 ++-- 46 files changed, 215 insertions(+), 215 deletions(-) diff --git a/docs/AppFramework.md b/docs/AppFramework.md index d2b8d5f34..fc87d36f9 100644 --- a/docs/AppFramework.md +++ b/docs/AppFramework.md @@ -26,7 +26,7 @@ In this example, you'll deploy a Standalone CR with a remote storage volume, the * In this example, the Splunk Apps are located at `bucket-app-framework-us-west-2/Standalone-us/networkAppsLoc/` and `bucket-app-framework-us-west-2/Standalone-us/authAppsLoc/`, and are both accessible through the end point `https://s3-us-west-2.amazonaws.com`. 5. Update the standalone CR specification and append the volume, App Source configuration, and scope. - * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CRs where the Splunk Enterprise instance will run the apps locally, set the `scope: local ` The Standalone and License Master CRs always use a local scope. + * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CRs where the Splunk Enterprise instance will run the apps locally, set the `scope: local ` The Standalone and License Manager CRs always use a local scope. Example: Standalone.yaml @@ -62,28 +62,28 @@ spec: The App Framework detects the Splunk App archive files available in the App Source locations, and deploys the apps to the standalone instance for local use. The App Framework will also scan for changes to the App Source folders based on the polling interval, and deploy updated archives to the instance. A Pod reset is triggered to install the new or modified apps. -Note: A similar approach can be used for installing apps on License Master using it's own CR. +Note: A similar approach can be used for installing apps on License Manager using it's own CR. For more information, see the [Description of App Framework Specification fields](#description-of-app-framework-specification-fields). ### How to use the App Framework on Indexer Cluster -This example describes the installation of apps on Indexer Cluster as well as Cluster Master. This is achieved by deploying a ClusterMaster CR with a remote storage volume, the location of the app archives, and set the installation scope to support both local and cluster app distribution. +This example describes the installation of apps on Indexer Cluster as well as Cluster Manager. This is achieved by deploying a ClusterMaster CR with a remote storage volume, the location of the app archives, and set the installation scope to support both local and cluster app distribution. 1. Confirm your S3-based remote storage volume path and URL. 2. Create a Kubernetes Secret Object with the storage credentials. * Example: `kubectl create secret generic s3-secret --from-literal=s3_access_key=AKIAIOSFODNN7EXAMPLE --from-literal=s3_secret_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` 3. Create folders on the remote storage volume to use as App Source locations. - * An App Source is a folder on the remote storage volume containing a subset of Splunk Apps and Add-ons. In this example, we have Splunk apps that are installed and run locally on the cluster master, and apps that will be distributed to all cluster peers by the cluster master. - * The apps are split across 3 folders named `networkApps`, `clusterBase`, and `adminApps` . The apps placed into `networkApps` and `clusterBase` are distributed to the cluster peers, but the apps in `adminApps` are for local use on the cluster master instance only. + * An App Source is a folder on the remote storage volume containing a subset of Splunk Apps and Add-ons. In this example, we have Splunk apps that are installed and run locally on the cluster manager, and apps that will be distributed to all cluster peers by the cluster manager. + * The apps are split across 3 folders named `networkApps`, `clusterBase`, and `adminApps` . The apps placed into `networkApps` and `clusterBase` are distributed to the cluster peers, but the apps in `adminApps` are for local use on the cluster manager instance only. 4. Copy your Splunk App or Add-on archive files to the App Source. - * In this example, the Splunk Apps for the cluster peers are located at `bucket-app-framework-us-west-2/idxcAndCmApps/networkAppsLoc/`, `bucket-app-framework-us-west-2/idxcAndCmApps/clusterBaseLoc/`, and the apps for the cluster master are located at`bucket-app-framework-us-west-2/idxcAndCmApps/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com`. + * In this example, the Splunk Apps for the cluster peers are located at `bucket-app-framework-us-west-2/idxcAndCmApps/networkAppsLoc/`, `bucket-app-framework-us-west-2/idxcAndCmApps/clusterBaseLoc/`, and the apps for the cluster manager are located at`bucket-app-framework-us-west-2/idxcAndCmApps/adminAppsLoc/`. They are all accessible through the end point `https://s3-us-west-2.amazonaws.com`. 5. Update the ClusterMaster CR specification and append the volume, App Source configuration, and scope. * The scope determines where the apps and add-ons are placed into the Splunk Enterprise instance. For CR's where the Splunk Enterprise instance will deploy the apps to cluster peers, set the `scope: cluster`. The ClusterMaster and SearchHeadCluster CR's support both cluster and local scopes. - * In this example, the cluster master will run some apps locally, and deploy other apps to the cluster peers. The App Source folder `adminApps` are Splunk Apps that are installed on the cluster master, and will use a local scope. The apps in the App Source folders `networkApps` and `clusterBase` will be deployed from the cluster master to the peers, and will use a cluster scope. + * In this example, the cluster manager will run some apps locally, and deploy other apps to the cluster peers. The App Source folder `adminApps` are Splunk Apps that are installed on the cluster manager, and will use a local scope. The apps in the App Source folders `networkApps` and `clusterBase` will be deployed from the cluster manager to the peers, and will use a cluster scope. Example: ClusterMaster.yaml @@ -120,9 +120,9 @@ spec: 6. Apply the Custom Resource specification: `kubectl apply -f ClusterMaster.yaml` -The App Framework detects the Splunk App archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the cluster master instance for local use. A Pod reset is triggered on the cluster master to install any new or modified apps. The App Framework will also scan for changes to the App Source folders based on the polling interval, and deploy updated archives to the instance. +The App Framework detects the Splunk App archive files available in the App Source locations, and deploys the apps from the `adminApps` folder to the cluster manager instance for local use. A Pod reset is triggered on the cluster manager to install any new or modified apps. The App Framework will also scan for changes to the App Source folders based on the polling interval, and deploy updated archives to the instance. -The apps in the `networkApps` and `clusterBase` folders are deployed to the cluster master for use on the cluster. The cluster master is responsible for deploying those apps to the cluster peers. The Splunk cluster peer restarts are triggered by the contents of the Splunk apps deployed, and are not initiated by the App Framework. +The apps in the `networkApps` and `clusterBase` folders are deployed to the cluster manager for use on the cluster. The cluster manager is responsible for deploying those apps to the cluster peers. The Splunk cluster peer restarts are triggered by the contents of the Splunk apps deployed, and are not initiated by the App Framework. For more information, see the [Description of App Framework Specification fields](#description-of-app-framework-specification-fields) @@ -320,7 +320,7 @@ Here is a typical App framework configuration in a Custom resource definition: * Splunk Operator CRDs support the configuration of [initialDelaySeconds](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) for both Liveliness (livenessInitialDelaySeconds) and Readiness (readinessInitialDelaySeconds) probes * When App Framework is NOT configured, default values are 300 seconds for livenessInitialDelaySeconds and 10 seconds for readinessInitialDelaySeconds (for all CRs) -* When App Framework is configured, default values are 1800 seconds for livenessInitialDelaySeconds and 10 seconds for readinessInitialDelaySeconds (only for Deployer, Cluster Master, Standalone and License Master CRs). The higher value of livenessInitialDelaySeconds is to ensure sufficient time is allocated for installing most apps. This configuration can further be managed depending on the number & size of Apps to be installed +* When App Framework is configured, default values are 1800 seconds for livenessInitialDelaySeconds and 10 seconds for readinessInitialDelaySeconds (only for Deployer, Cluster Manager, Standalone and License Manager CRs). The higher value of livenessInitialDelaySeconds is to ensure sufficient time is allocated for installing most apps. This configuration can further be managed depending on the number & size of Apps to be installed ## App Framework Limitations @@ -330,4 +330,4 @@ The App Framework does not review, preview, analyze, or enable Splunk Apps and A 2. The App Framework tracks the app installation state per CR. Whenever you scale up a Standalone CR, all the existing pods will recycle and all the apps in app sources will be re-installed. This is done so that the new replica(s) can install all the apps and not just the apps that were changed recently. -3. When a change in the App Repo is detected by the App Framework, a pod reset is initiated to install the new or modified applications. For the ClusterMaster and SearchHeadCluster CR’s, a pod reset is applied to the cluster master and deployer instances only. A cluster peer restart might be triggered by the contents of the Splunk apps deployed, but are not initiated by the App Framework. \ No newline at end of file +3. When a change in the App Repo is detected by the App Framework, a pod reset is initiated to install the new or modified applications. For the ClusterMaster and SearchHeadCluster CR’s, a pod reset is applied to the cluster manager and deployer instances only. A cluster peer restart might be triggered by the contents of the Splunk apps deployed, but are not initiated by the App Framework. \ No newline at end of file diff --git a/docs/ChangeLog.md b/docs/ChangeLog.md index 61f4a6d7c..a0fdff5cc 100644 --- a/docs/ChangeLog.md +++ b/docs/ChangeLog.md @@ -45,7 +45,7 @@ * System resources & Storage requirements * How to Upgrade Splunk Operator * Ingress documentation updates with ngingx examples - * How to configure Indexer cluster to use License Master + * How to configure Indexer cluster to use License Manager * Nightly build pipeline enhanced to run on EKS Cluster @@ -117,7 +117,7 @@ * Introduction of SmartStore Index management feature. With this update, SmartStore-enabled Indexes can be configured through Custom resources. For more details, refer to [SmartStore.md](SmartStore.md) -* Added support for deployment of Multi-site Indexer Cluster. This release introduces a new ClusterMaster Custom Resource, thus allowing the Cluster Master to have it's own resource specifications. Further, the ClusterMaster & IndexerCluster Custom Resources can together be used to configure both Single & Multi-site Indexer clusters. For more details see [Examples.md](Examples.md) & [MultisiteExamples.md](MultisiteExamples.md) +* Added support for deployment of Multi-site Indexer Cluster. This release introduces a new ClusterMaster Custom Resource, thus allowing the Cluster Manager to have it's own resource specifications. Further, the ClusterMaster & IndexerCluster Custom Resources can together be used to configure both Single & Multi-site Indexer clusters. For more details see [Examples.md](Examples.md) & [MultisiteExamples.md](MultisiteExamples.md) * Feature to automatically add a configured Monitoring Console pod within a namespace. With this release, a Monitoring Console pod is automatically configured & also has the ability to reconfigure itself based on the changes within the namespace. For more details, refer to [Examples.md](Examples.md) @@ -160,7 +160,7 @@ to any corresponding Pod and Service objects that the operator creates. * A unique pass4SymmKey secret will now be randomly generated, to resolve - cluster master warnings about using the default value. + cluster manager warnings about using the default value. * Integrated with CircleCI and Coverall for CICD and code coverage, and added a bunch of unit tests to bring coverage up to over 90%. @@ -198,7 +198,7 @@ ## 0.0.5 Alpha (2019-10-31) * Added port 8088 to expose on indexers, and only exposting DFC ports on search heads -* Bug fix: The spark-master deployment was always updated during reconciliation +* Bug fix: The spark-manager deployment was always updated during reconciliation ## 0.0.4 Alpha (2019-10-22) @@ -210,7 +210,7 @@ ## 0.0.3 Alpha (2019-08-14) -* Switched single instances, deployer, cluster master and license master +* Switched single instances, deployer, cluster manager and license manager from using Deployment to StatefulSet ## 0.0.2 & 0.0.1 diff --git a/docs/Examples.md b/docs/Examples.md index 1d80f276f..afb46b327 100644 --- a/docs/Examples.md +++ b/docs/Examples.md @@ -6,7 +6,7 @@ deployments. - [Creating a Clustered Deployment](#creating-a-clustered-deployment) - [Indexer Clusters](#indexer-clusters) - - [Cluster Master](#cluster-master) + - [Cluster Manager](#cluster-manager) - [Indexer part](#indexer-part) - [Search Head Clusters](#search-head-clusters) - [Cluster Services](#cluster-services) @@ -16,9 +16,9 @@ deployments. - [Installing Splunk Apps](#installing-splunk-apps) - [Using Apps for Splunk Configuration](#using-apps-for-splunk-configuration) - [Creating a LicenseMaster Using a ConfigMap](#creating-a-licensemaster-using-a-configmap) - - [Configuring Standalone to use License Master](#configuring-standalone-to-use-license-master) - - [Configuring Indexer Clusters to use License Master](#configuring-indexer-clusters-to-use-license-master) - - [Using an External License Master](#using-an-external-license-master) + - [Configuring Standalone to use License Manager](#configuring-standalone-to-use-license-manager) + - [Configuring Indexer Clusters to use License Manager](#configuring-indexer-clusters-to-use-license-manager) + - [Using an External License Manager](#using-an-external-license-manager) - [Using an External Indexer Cluster](#using-an-external-indexer-cluster) - [Managing global kubernetes secret object](#managing-global-kubernetes-secret-object) - [Creating global kubernetes secret object](#creating-global-kubernetes-secret-object) @@ -51,9 +51,9 @@ The passwords for the instance are generated automatically. To review the passwo When growing, customers will typically want to first expand by upgrading to an [indexer cluster](https://docs.splunk.com/Documentation/Splunk/latest/Indexer/Aboutindexesandindexers). -The Splunk Operator makes creation of an indexer cluster as easy as creating a `ClusterMaster` resource for Cluster Master and an `IndexerCluster` resource for indexers part respectively: +The Splunk Operator makes creation of an indexer cluster as easy as creating a `ClusterMaster` resource for Cluster Manager and an `IndexerCluster` resource for indexers part respectively: -#### Cluster Master +#### Cluster Manager ```yaml cat < #### Multiple Hosts and HEC Configuration -If your deployment has multiple hosts such as Search Heads and Cluster Master, use this example to configure Splunk Web access, and HTTP Event Collector port. Follow the steps here [HEC Documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) to learn how to create a HEC token and how to send data using HTTP. +If your deployment has multiple hosts such as Search Heads and Cluster Manager, use this example to configure Splunk Web access, and HTTP Event Collector port. Follow the steps here [HEC Documentation](https://docs.splunk.com/Documentation/Splunk/latest/Data/UsetheHTTPEventCollector) to learn how to create a HEC token and how to send data using HTTP. 1. Create a Gateway for multiple hosts. diff --git a/docs/MultisiteExamples.md b/docs/MultisiteExamples.md index 2034d6648..2988d7580 100644 --- a/docs/MultisiteExamples.md +++ b/docs/MultisiteExamples.md @@ -5,7 +5,7 @@ This document provides examples to configure a multisite cluster using the splun - [Multisite Indexer Clusters in Kubernetes](#multisite-indexer-clusters-in-kubernetes) - [Multipart IndexerCluster](#multipart-indexercluster) - - [Deploy the cluster-master](#deploy-the-cluster-master) + - [Deploy the cluster-manager](#deploy-the-cluster-manager) - [Deploy the indexer sites](#deploy-the-indexer-sites) - [Connecting a search-head cluster to a multisite indexer-cluster](#connecting-a-search-head-cluster-to-a-multisite-indexer-cluster) @@ -38,11 +38,11 @@ configured with a hardcoded site. Advantages: - some operations are performed per site which mitigates the risk of impact on the whole cluster (e.g. Splunk upgrades, scaling up resources) -- specific indexer services are created per site allowing to send events to the indexers located in the same zone, avoiding possible cost of cross-zone traffic. Indexer discovery from cluster-master can do this for forwarders, but this solution also covers http/HEC traffic +- specific indexer services are created per site allowing to send events to the indexers located in the same zone, avoiding possible cost of cross-zone traffic. Indexer discovery from cluster-manager can do this for forwarders, but this solution also covers http/HEC traffic Limitation: all the IndexerCluster resources must be located in the same namespace -#### Deploy the cluster-master +#### Deploy the cluster-manager Note: the image version is defined in these resources as this allows to control the upgrade cycle @@ -126,10 +126,10 @@ https://docs.splunk.com/Documentation/Splunk/latest/DistSearch/DeploymultisiteSH for artifact replication, so mapping Splunk sites to Kubernetes zones is not relevant in that context. SearchHeadCluster resources can be connected to a multisite indexer cluster the same way as for single site. -The name of the IndexerCluster part containing the cluster master must be referenced in parameter `clusterMasterRef`. +The name of the IndexerCluster part containing the cluster manager must be referenced in parameter `clusterMasterRef`. Additional ansible default parameters must be set to activate multisite: -* `multisite_master`: which should reference the cluster-master service of the target indexer cluster +* `multisite_master`: which should reference the cluster-manager service of the target indexer cluster * `site`: which should in general be set to `site: site0` to disable search affinity ([documentation for more details] (https://docs.splunk.com/Documentation/Splunk/latest/DistSearch/DeploymultisiteSHC#Integrate_a_search_head_cluster_with_a_multisite_indexer_cluster)) diff --git a/docs/SmartStore.md b/docs/SmartStore.md index 284f4ec27..5fffb1920 100644 --- a/docs/SmartStore.md +++ b/docs/SmartStore.md @@ -69,7 +69,7 @@ Note: Custom apps with higher precedence can potentially overwrite the index and 1. Create a Secret object with Secret & Access credentials, as explained in [Storing SmartStore Secrets](#storing-smartstore-secrets) 2. Confirm your S3-based storage volume path and URL. 3. Confirm the name of the Splunk indexes being used with the SmartStore volume. -4. Create/Update the Cluster Master Customer Resource specification with volume and index configuration (see Example below) +4. Create/Update the Cluster Manager Customer Resource specification with volume and index configuration (see Example below) 5. Apply the Customer Resource specification: kubectl -f apply Clustermaster.yaml 6. Follow the rest of the steps to Create an Indexer Cluster. See [Examples](Examples.md) @@ -103,8 +103,8 @@ spec: ``` -The SmartStore parameters will be placed into the required .conf files in an app. The app is named as `splunk-operator`. In case of a Indexer cluster deployment, the app is located on Cluster master at `/opt/splunk/etc/master-apps/`. -Once the SmartStore configuration is populated to Cluster Master's `splunk-operator` app, Operator issues a bundle push command to Cluster Master, so that the SmartStore configuration is distributed to all the peers in that indexer cluster +The SmartStore parameters will be placed into the required .conf files in an app. The app is named as `splunk-operator`. In case of a Indexer cluster deployment, the app is located on Cluster manager at `/opt/splunk/etc/master-apps/`. +Once the SmartStore configuration is populated to Cluster Manager's `splunk-operator` app, Operator issues a bundle push command to Cluster Manager, so that the SmartStore configuration is distributed to all the peers in that indexer cluster Note: Custom apps with higher precedence can potentially overwrite the index and volume configuration in the splunk-operator app. Hence, care should be taken to avoid conflicting SmartStore configuration in custom apps. See [Configuration file precedence order](https://docs.splunk.com/Documentation/Splunk/latest/Admin/Wheretofindtheconfigurationfiles#How_Splunk_determines_precedence_order) diff --git a/docs/SplunkOperatorUpgrade.md b/docs/SplunkOperatorUpgrade.md index 89a9ee440..fad841854 100644 --- a/docs/SplunkOperatorUpgrade.md +++ b/docs/SplunkOperatorUpgrade.md @@ -76,7 +76,7 @@ image: splunk/splunk:8.1.2 This is an example of the process followed by the Splunk Operator if the operator version is upgraded and a later Splunk Enterprise Docker image is available: ​ 1. A new Splunk Operator pod will be created, and the existing operator pod will be terminated. -2. Any existing License Master, Search Head, Deployer, ClusterMaster, Standalone pods will be terminated to be redeployed with the upgraded spec. +2. Any existing License Manager, Search Head, Deployer, ClusterMaster, Standalone pods will be terminated to be redeployed with the upgraded spec. 3. After a ClusterMaster pod is restarted, the Indexer Cluster pods which are connected to it are terminated and redeployed. 4. After all pods in the Indexer cluster and Search head cluster are redeployed, the Monitoring Console pod is terminated and redeployed. * Note: If there are multiple pods per Custom Resource, the pods are terminated and re-deployed in a descending order with the highest numbered pod going first diff --git a/pkg/apis/enterprise/v2/clustermaster_types.go b/pkg/apis/enterprise/v2/clustermaster_types.go index 345d46224..9fffdfe6f 100644 --- a/pkg/apis/enterprise/v2/clustermaster_types.go +++ b/pkg/apis/enterprise/v2/clustermaster_types.go @@ -40,7 +40,7 @@ type ClusterMasterSpec struct { // ClusterMasterStatus defines the observed state of ClusterMaster type ClusterMasterStatus struct { - // current phase of the cluster master + // current phase of the cluster manager Phase splcommon.Phase `json:"phase"` // selector for pods, used by HorizontalPodAutoscaler @@ -69,7 +69,7 @@ type BundlePushInfo struct { // ClusterMaster is the Schema for the clustermasters API // +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of cluster master" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of cluster manager" // +kubebuilder:resource:path=clustermasters,scope=Namespaced,shortName=cm-idxc // +kubebuilder:storageversion type ClusterMaster struct { diff --git a/pkg/apis/enterprise/v2/common_types.go b/pkg/apis/enterprise/v2/common_types.go index b10aa365b..2a2d485c9 100644 --- a/pkg/apis/enterprise/v2/common_types.go +++ b/pkg/apis/enterprise/v2/common_types.go @@ -93,13 +93,13 @@ type CommonSplunkSpec struct { // Full path or URL for one or more defaults.yml files specific // to App install, separated by commas. The defaults listed here // will be installed on the CM, standalone, search head deployer - // or license master instance. + // or license manager instance. DefaultsURLApps string `json:"defaultsUrlApps"` // Full path or URL for a Splunk Enterprise license file LicenseURL string `json:"licenseUrl"` - // LicenseMasterRef refers to a Splunk Enterprise license master managed by the operator within Kubernetes + // LicenseMasterRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes LicenseMasterRef corev1.ObjectReference `json:"licenseMasterRef"` // ClusterMasterRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes diff --git a/pkg/apis/enterprise/v2/indexercluster_types.go b/pkg/apis/enterprise/v2/indexercluster_types.go index 30971bbed..ca76199ce 100644 --- a/pkg/apis/enterprise/v2/indexercluster_types.go +++ b/pkg/apis/enterprise/v2/indexercluster_types.go @@ -47,7 +47,7 @@ type IndexerClusterMemberStatus struct { // Status of the indexer cluster peer Status string `json:"status"` - // The ID of the configuration bundle currently being used by the master. + // The ID of the configuration bundle currently being used by the manager. ActiveBundleID string `json:"active_bundle_id"` // Count of the number of buckets on this peer, across all indexes. @@ -62,7 +62,7 @@ type IndexerClusterStatus struct { // current phase of the indexer cluster Phase splcommon.Phase `json:"phase"` - // current phase of the cluster master + // current phase of the cluster manager ClusterMasterPhase splcommon.Phase `json:"clusterMasterPhase"` // desired number of indexer peers @@ -80,7 +80,7 @@ type IndexerClusterStatus struct { // Indicates if the cluster is ready for indexing. IndexingReady bool `json:"indexing_ready_flag"` - // Indicates whether the master is ready to begin servicing, based on whether it is initialized. + // Indicates whether the manager is ready to begin servicing, based on whether it is initialized. ServiceReady bool `json:"service_ready_flag"` // Indicates when the idxc_secret has been changed for a peer @@ -106,7 +106,7 @@ type IndexerClusterStatus struct { // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:resource:path=indexerclusters,scope=Namespaced,shortName=idc;idxc // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of indexer cluster" -// +kubebuilder:printcolumn:name="Master",type="string",JSONPath=".status.clusterMasterPhase",description="Status of cluster master" +// +kubebuilder:printcolumn:name="Manager",type="string",JSONPath=".status.clusterMasterPhase",description="Status of cluster manager" // +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of indexer peers" // +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready indexer peers" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of indexer cluster" diff --git a/pkg/apis/enterprise/v2/licensemaster_types.go b/pkg/apis/enterprise/v2/licensemaster_types.go index 033ac865a..e60cfea38 100644 --- a/pkg/apis/enterprise/v2/licensemaster_types.go +++ b/pkg/apis/enterprise/v2/licensemaster_types.go @@ -28,7 +28,7 @@ import ( // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html // see also https://book.kubebuilder.io/reference/markers/crd.html -// LicenseMasterSpec defines the desired state of a Splunk Enterprise license master. +// LicenseMasterSpec defines the desired state of a Splunk Enterprise license manager. type LicenseMasterSpec struct { CommonSplunkSpec `json:",inline"` @@ -36,9 +36,9 @@ type LicenseMasterSpec struct { AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` } -// LicenseMasterStatus defines the observed state of a Splunk Enterprise license master. +// LicenseMasterStatus defines the observed state of a Splunk Enterprise license manager. type LicenseMasterStatus struct { - // current phase of the license master + // current phase of the license manager Phase splcommon.Phase `json:"phase"` // App Framework Context @@ -47,11 +47,11 @@ type LicenseMasterStatus struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// LicenseMaster is the Schema for a Splunk Enterprise license master. +// LicenseMaster is the Schema for a Splunk Enterprise license manager. // +kubebuilder:subresource:status // +kubebuilder:resource:path=licensemasters,scope=Namespaced,shortName=lm -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of license master" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of license master" +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of license manager" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of license manager" // +kubebuilder:storageversion type LicenseMaster struct { metav1.TypeMeta `json:",inline"` diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go index 8c0733561..b145784ca 100644 --- a/pkg/splunk/client/enterprise.go +++ b/pkg/splunk/client/enterprise.go @@ -371,7 +371,7 @@ type ClusterBundleInfo struct { Timestamp int64 `json:"timestamp"` } -// ClusterMasterInfo represents the status of the indexer cluster master. +// ClusterMasterInfo represents the status of the indexer cluster manager. // See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Finfo type ClusterMasterInfo struct { // Indicates if the cluster is initialized. @@ -380,31 +380,31 @@ type ClusterMasterInfo struct { // Indicates if the cluster is ready for indexing. IndexingReady bool `json:"indexing_ready_flag"` - // Indicates whether the master is ready to begin servicing, based on whether it is initialized. + // Indicates whether the manager is ready to begin servicing, based on whether it is initialized. ServiceReady bool `json:"service_ready_flag"` // Indicates if the cluster is in maintenance mode. MaintenanceMode bool `json:"maintenance_mode"` - // Indicates whether the master is restarting the peers in a cluster. + // Indicates whether the manager is restarting the peers in a cluster. RollingRestart bool `json:"rolling_restart_flag"` - // The name for the master. Displayed in the Splunk Web manager page. + // The name for the manager. Displayed in the Splunk Web manager page. Label string `json:"label"` - // Provides information about the active bundle for this master. + // Provides information about the active bundle for this manager. ActiveBundle ClusterBundleInfo `json:"active_bundle"` - // The most recent information reflecting any changes made to the master-apps configuration bundle. + // The most recent information reflecting any changes made to the manager-apps configuration bundle. // In steady state, this is equal to active_bundle. If it is not equal, then pushing the latest bundle to all peers is in process (or needs to be started). LatestBundle ClusterBundleInfo `json:"latest_bundle"` - // Timestamp corresponding to the creation of the master. + // Timestamp corresponding to the creation of the manager. StartTime int64 `json:"start_time"` } -// GetClusterMasterInfo queries the cluster master for info about the indexer cluster. -// You can only use this on a cluster master. +// GetClusterMasterInfo queries the cluster manager for info about the indexer cluster. +// You can only use this on a cluster manager. // See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Finfo func (c *SplunkClient) GetClusterMasterInfo() (*ClusterMasterInfo, error) { apiResponse := struct { @@ -424,23 +424,23 @@ func (c *SplunkClient) GetClusterMasterInfo() (*ClusterMasterInfo, error) { } // IndexerClusterPeerInfo represents the status of a indexer cluster peer. -// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fslave.2Finfo +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fpeer.2Finfo type IndexerClusterPeerInfo struct { // Current bundle being used by this peer. ActiveBundle ClusterBundleInfo `json:"active_bundle"` - // Lists information about the most recent bundle downloaded from the master. + // Lists information about the most recent bundle downloaded from the manager. LatestBundle ClusterBundleInfo `json:"latest_bundle"` // The initial bundle generation ID recognized by this peer. Any searches from previous generations fail. - // The initial bundle generation ID is created when a peer first comes online, restarts, or recontacts the master. + // The initial bundle generation ID is created when a peer first comes online, restarts, or recontacts the manager. // Note that this is reported as a very large number (18446744073709552000) that breaks Go's JSON library, while the peer is being decommissioned. //BaseGenerationID uint64 `json:"base_generation_id"` - // Indicates if this peer is registered with the master in the cluster. + // Indicates if this peer is registered with the manager in the cluster. Registered bool `json:"is_registered"` - // Timestamp for the last attempt to contact the master. + // Timestamp for the last attempt to contact the manager. LastHeartbeatAttempt int64 `json:"last_heartbeat_attempt"` // Indicates whether the peer needs to be restarted to enable its cluster configuration. @@ -452,7 +452,7 @@ type IndexerClusterPeerInfo struct { // GetIndexerClusterPeerInfo queries info from a indexer cluster peer. // You can use this on any peer in an indexer cluster. -// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fslave.2Finfo +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fpeer.2Finfo func (c *SplunkClient) GetIndexerClusterPeerInfo() (*IndexerClusterPeerInfo, error) { apiResponse := struct { Entry []struct { @@ -470,7 +470,7 @@ func (c *SplunkClient) GetIndexerClusterPeerInfo() (*IndexerClusterPeerInfo, err return &apiResponse.Entry[0].Content, nil } -// ClusterMasterPeerInfo represents the status of a indexer cluster peer (cluster master endpoint). +// ClusterMasterPeerInfo represents the status of a indexer cluster peer (cluster manager endpoint). // See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Fpeers type ClusterMasterPeerInfo struct { // Unique identifier or GUID for the peer @@ -479,11 +479,11 @@ type ClusterMasterPeerInfo struct { // The name for the peer. Displayed on the manager page. Label string `json:"label"` - // The ID of the configuration bundle currently being used by the master. + // The ID of the configuration bundle currently being used by the manager. ActiveBundleID string `json:"active_bundle_id"` // The initial bundle generation ID recognized by this peer. Any searches from previous generations fail. - // The initial bundle generation ID is created when a peer first comes online, restarts, or recontacts the master. + // The initial bundle generation ID is created when a peer first comes online, restarts, or recontacts the manager. // Note that this is reported as a very large number (18446744073709552000) that breaks Go's JSON library, while the peer is being decommissioned. //BaseGenerationID uint64 `json:"base_generation_id"` @@ -509,7 +509,7 @@ type ClusterMasterPeerInfo struct { // The ID of the configuration bundle this peer is using. LatestBundleID string `json:"latest_bundle_id"` - // Used by the master to keep track of pending jobs requested by the master to this peer. + // Used by the manager to keep track of pending jobs requested by the manager to this peer. PendingJobCount int `json:"pending_job_count"` // Number of buckets for which the peer is primary in its local site, or the number of buckets that return search results from same site as the peer. @@ -572,8 +572,8 @@ type ClusterMasterPeerInfo struct { } `json:"status_counter"` } -// GetClusterMasterPeers queries the cluster master for info about indexer cluster peers. -// You can only use this on a cluster master. +// GetClusterMasterPeers queries the cluster manager for info about indexer cluster peers. +// You can only use this on a cluster manager. // See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Fpeers func (c *SplunkClient) GetClusterMasterPeers() (map[string]ClusterMasterPeerInfo, error) { apiResponse := struct { @@ -598,10 +598,10 @@ func (c *SplunkClient) GetClusterMasterPeers() (map[string]ClusterMasterPeerInfo } // RemoveIndexerClusterPeer removes peer from an indexer cluster, where id=unique GUID for the peer. -// You can only use this on a cluster master. +// You can only use this on a cluster manager. // See https://docs.splunk.com/Documentation/Splunk/8.0.2/Indexer/Removepeerfrommasterlist func (c *SplunkClient) RemoveIndexerClusterPeer(id string) error { - // sent request to remove a peer from Cluster Master peers list + // sent request to remove a peer from Cluster Manager peers list endpoint := fmt.Sprintf("%s/services/cluster/master/control/control/remove_peers?peers=%s", c.ManagementURI, id) request, err := http.NewRequest("POST", endpoint, nil) if err != nil { @@ -628,7 +628,7 @@ func (c *SplunkClient) DecommissionIndexerClusterPeer(enforceCounts bool) error return c.Do(request, expectedStatus, nil) } -// BundlePush pushes the CM master apps bundle to all the indexer peers +// BundlePush pushes the Cluster manager apps bundle to all the indexer peers func (c *SplunkClient) BundlePush(ignoreIdenticalBundle bool) error { endpoint := fmt.Sprintf("%s/services/cluster/master/control/default/apply", c.ManagementURI) reqBody := fmt.Sprintf("&ignore_identical_bundle=%t", ignoreIdenticalBundle) @@ -919,7 +919,7 @@ func (c *SplunkClient) GetClusterInfo(mockCall bool) (*ClusterInfo, error) { } // SetIdxcSecret sets idxc_secret for a Splunk Instance -// Can be used on any peer in an indexer cluster as long as the idxc_secret matches the cluster master +// Can be used on any peer in an indexer cluster as long as the idxc_secret matches the cluster manager // See https://docs.splunk.com/Documentation/Splunk/7.0.0/RESTREF/RESTcluster#cluster.2Fconfig.2Fconfig func (c *SplunkClient) SetIdxcSecret(idxcSecret string) error { endpoint := fmt.Sprintf("%s/services/cluster/config/config?secret=%s", c.ManagementURI, idxcSecret) diff --git a/pkg/splunk/common/messages.go b/pkg/splunk/common/messages.go index 0654794d3..045d7ea63 100644 --- a/pkg/splunk/common/messages.go +++ b/pkg/splunk/common/messages.go @@ -30,6 +30,6 @@ const ( // SecretTokenNotRetrievable indicates missing secret token in pod secret SecretTokenNotRetrievable = "Couldn't retrieve %s from secret data" - // EmptyClusterMasterRef indicates an empty cluster master reference + // EmptyClusterMasterRef indicates an empty cluster manager reference EmptyClusterMasterRef = "Empty cluster master reference" ) diff --git a/pkg/splunk/enterprise/clustermaster.go b/pkg/splunk/enterprise/clustermaster.go index 71a2e4d9d..47612b1dd 100644 --- a/pkg/splunk/enterprise/clustermaster.go +++ b/pkg/splunk/enterprise/clustermaster.go @@ -31,7 +31,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ApplyClusterMaster reconciles the state of a Splunk Enterprise cluster master. +// ApplyClusterMaster reconciles the state of a Splunk Enterprise cluster manager. func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds @@ -122,13 +122,13 @@ func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterpriseApi.Clu return result, err } - // create or update a regular service for the cluster master + // create or update a regular service for the cluster manager err = splctrl.ApplyService(client, getSplunkService(cr, &cr.Spec.CommonSplunkSpec, SplunkClusterMaster, false)) if err != nil { return result, err } - // create or update statefulset for the cluster master + // create or update statefulset for the cluster manager statefulSet, err := getClusterMasterStatefulSet(client, cr) if err != nil { return result, err @@ -151,7 +151,7 @@ func ApplyClusterMaster(client splcommon.ControllerClient, cr *enterpriseApi.Clu return result, err } - // Master apps bundle push requires multiple reconcile iterations in order to reflect the configMap on the CM pod. + // Manager apps bundle push requires multiple reconcile iterations in order to reflect the configMap on the CM pod. // So keep PerformCmBundlePush() as the last call in this block of code, so that other functionalities are not blocked err = PerformCmBundlePush(client, cr) if err != nil { @@ -190,7 +190,7 @@ func validateClusterMasterSpec(cr *enterpriseApi.ClusterMaster) error { return validateCommonSplunkSpec(&cr.Spec.CommonSplunkSpec) } -// getClusterMasterStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. +// getClusterMasterStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license manager. func getClusterMasterStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) (*appsv1.StatefulSet, error) { var extraEnvVar []corev1.EnvVar @@ -237,7 +237,7 @@ func CheckIfsmartstoreConfigMapUpdatedToPod(c splcommon.ControllerClient, cr *en return fmt.Errorf("Smartstore ConfigMap is missing") } -// PerformCmBundlePush initiates the bundle push from cluster master +// PerformCmBundlePush initiates the bundle push from cluster manager func PerformCmBundlePush(c splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) error { if cr.Status.BundlePushTracker.NeedToPushMasterApps == false { return nil @@ -258,7 +258,7 @@ func PerformCmBundlePush(c splcommon.ControllerClient, cr *enterpriseApi.Cluster // The amount of time it takes for the configMap update to Pod depends on // how often the Kubelet on the K8 node refreshes its cache with API server. // From our tests, the Pod can take as high as 90 seconds. So keep checking - // for the configMap update to the Pod before proceeding for the master apps + // for the configMap update to the Pod before proceeding for the manager apps // bundle push. err := CheckIfsmartstoreConfigMapUpdatedToPod(c, cr) @@ -275,7 +275,7 @@ func PerformCmBundlePush(c splcommon.ControllerClient, cr *enterpriseApi.Cluster return err } -// PushMasterAppsBundle issues the REST command to for cluster master bundle push +// PushMasterAppsBundle issues the REST command to for cluster manager bundle push func PushMasterAppsBundle(c splcommon.ControllerClient, cr *enterpriseApi.ClusterMaster) error { scopedLog := log.WithName("PushMasterApps").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/configuration.go b/pkg/splunk/enterprise/configuration.go index 0a509cd06..d6b82f9a6 100644 --- a/pkg/splunk/enterprise/configuration.go +++ b/pkg/splunk/enterprise/configuration.go @@ -38,7 +38,7 @@ var logC = logf.Log.WithName("splunk.enterprise.configValidation") // getSplunkLabels returns a map of labels to use for Splunk Enterprise components. func getSplunkLabels(instanceIdentifier string, instanceType InstanceType, partOfIdentifier string) map[string]string { - // For multisite / multipart IndexerCluster, the name of the part containing the cluster-master is used + // For multisite / multipart IndexerCluster, the name of the part containing the cluster-manager is used // to set the label app.kubernetes.io/part-of on all the parts so that its indexer service can select // the indexers from all the parts. Otherwise partOfIdentifier is equal to instanceIdentifier. if instanceType != SplunkIndexer || len(partOfIdentifier) == 0 { @@ -133,7 +133,7 @@ func getSplunkService(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkS partOfIdentifier = instanceIdentifier instanceIdentifier = "" } else { - // And for child parts of multisite / multipart IndexerCluster, use the name of the part containing the cluster-master + // And for child parts of multisite / multipart IndexerCluster, use the name of the part containing the cluster-manager // in the app.kubernetes.io/part-of label partOfIdentifier = spec.ClusterMasterRef.Name } @@ -573,7 +573,7 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem }) // 1. For Indexer cluster case, do not set the annotation on CM pod. smartstore config is - // propagated through the CM master apps bundle push + // propagated through the CM manager apps bundle push // 2. In case of Standalone, reset the Pod, by updating the latest Resource version of the // smartstore config map. if instanceType == SplunkStandalone { @@ -677,10 +677,10 @@ func updateSplunkPodTemplateWithConfig(client splcommon.ControllerClient, podTem }) } - // append URL for cluster master, if configured + // append URL for cluster manager, if configured var clusterMasterURL string if instanceType == SplunkClusterMaster { - // This makes splunk-ansible configure indexer-discovery on cluster-master + // This makes splunk-ansible configure indexer-discovery on cluster-manager clusterMasterURL = "localhost" } else if spec.ClusterMasterRef.Name != "" { clusterMasterURL = GetSplunkServiceName(SplunkClusterMaster, spec.ClusterMasterRef.Name, false) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 81502115b..704d0f8d3 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -203,7 +203,7 @@ func SetClusterMaintenanceMode(c splcommon.ControllerClient, cr *enterpriseApi.I } } - // Set cluster master maintenance mode + // Set cluster manager maintenance mode if enable { cr.Status.MaintenanceMode = true } else { @@ -310,7 +310,7 @@ func ApplyIdxcSecret(mgr *indexerClusterPodManager, replicas int32, mock bool) e During the recycle of indexer pods due to an idxc secret change, if there is a container restart(for example if the splunkd process dies) before the operator deletes the pod, the container restart fails due to mismatch of idxc password between Cluster - master and that particular indexer. + manager and that particular indexer. Changing the idxc passwords on the secrets mounted on the indexer pods to avoid the above. */ @@ -466,7 +466,7 @@ func (mgr *indexerClusterPodManager) getClient(n int32) *splclient.SplunkClient return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", adminPwd) } -// getClusterMasterClient for indexerClusterPodManager returns a SplunkClient for cluster master +// getClusterMasterClient for indexerClusterPodManager returns a SplunkClient for cluster manager func (mgr *indexerClusterPodManager) getClusterMasterClient() *splclient.SplunkClient { scopedLog := log.WithName("indexerClusterPodManager.getClusterMasterClient").WithValues("name", mgr.cr.GetName(), "namespace", mgr.cr.GetNamespace()) @@ -537,7 +537,7 @@ func (mgr *indexerClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSe return fmt.Errorf("Waiting for cluster master to become ready") } - // get indexer cluster info from cluster master if it's ready + // get indexer cluster info from cluster manager if it's ready c := mgr.getClusterMasterClient() clusterInfo, err := c.GetClusterMasterInfo() if err != nil { @@ -548,7 +548,7 @@ func (mgr *indexerClusterPodManager) updateStatus(statefulSet *appsv1.StatefulSe mgr.cr.Status.ServiceReady = clusterInfo.ServiceReady mgr.cr.Status.MaintenanceMode = clusterInfo.MaintenanceMode - // get peer information from cluster master + // get peer information from cluster manager peers, err := c.GetClusterMasterPeers() if err != nil { return err @@ -604,7 +604,7 @@ func validateIndexerClusterSpec(cr *enterpriseApi.IndexerCluster) error { return fmt.Errorf("IndexerCluster spec should refer to ClusterMaster via clusterMasterRef") } - // Multisite / multipart clusters: can't reference a cluster master located in another namespace because of Service and Secret limitations + // Multisite / multipart clusters: can't reference a cluster manager located in another namespace because of Service and Secret limitations if len(cr.Spec.ClusterMasterRef.Namespace) > 0 && cr.Spec.ClusterMasterRef.Namespace != cr.GetNamespace() { return fmt.Errorf("Multisite cluster does not support cluster master to be located in a different namespace") } diff --git a/pkg/splunk/enterprise/licensemaster.go b/pkg/splunk/enterprise/licensemaster.go index e70769ed8..8c5d54501 100644 --- a/pkg/splunk/enterprise/licensemaster.go +++ b/pkg/splunk/enterprise/licensemaster.go @@ -28,7 +28,7 @@ import ( splctrl "github.com/splunk/splunk-operator/pkg/splunk/controller" ) -// ApplyLicenseMaster reconciles the state for the Splunk Enterprise license master. +// ApplyLicenseMaster reconciles the state for the Splunk Enterprise license manager. func ApplyLicenseMaster(client splcommon.ControllerClient, cr *enterpriseApi.LicenseMaster) (reconcile.Result, error) { // unless modified, reconcile for this object will be requeued after 5 seconds @@ -123,7 +123,7 @@ func ApplyLicenseMaster(client splcommon.ControllerClient, cr *enterpriseApi.Lic return result, nil } -// getLicenseMasterStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. +// getLicenseMasterStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license manager. func getLicenseMasterStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.LicenseMaster) (*appsv1.StatefulSet, error) { ss, err := getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkLicenseMaster, 1, []corev1.EnvVar{}) if err != nil { diff --git a/pkg/splunk/enterprise/licensemaster_test.go b/pkg/splunk/enterprise/licensemaster_test.go index edfa0c212..29d93d459 100644 --- a/pkg/splunk/enterprise/licensemaster_test.go +++ b/pkg/splunk/enterprise/licensemaster_test.go @@ -108,7 +108,7 @@ func TestGetLicenseMasterStatefulSet(t *testing.T) { cr.Spec.LicenseURL = "/mnt/splunk.lic" test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-license-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-license-master-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_license_master"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_LICENSE_URI","value":"/mnt/splunk.lic"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-license-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-license-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) - // Allow installing apps via DefaultsURLApps for Licence Master + // Allow installing apps via DefaultsURLApps for Licence Manager cr.Spec.DefaultsURLApps = "/mnt/apps/apps.yml" test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-stack1-license-master","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"","name":"stack1","uid":"","controller":true}]},"spec":{"replicas":1,"selector":{"matchLabels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000"}},"spec":{"volumes":[{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-stack1-license-master-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"}],"env":[{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/apps/apps.yml,/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_license_master"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_LICENSE_URI","value":"/mnt/splunk.lic"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/sbin/checkstate.sh"]},"initialDelaySeconds":300,"timeoutSeconds":30,"periodSeconds":30},"readinessProbe":{"exec":{"command":["/bin/grep","started","/opt/container_artifact/splunk-container.state"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5},"imagePullPolicy":"IfNotPresent"}],"securityContext":{"runAsUser":41812,"fsGroup":41812},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-stack1-license-master"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"license-master","app.kubernetes.io/instance":"splunk-stack1-license-master","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"license-master","app.kubernetes.io/part-of":"splunk-stack1-license-master"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-stack1-license-master-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0}}`) diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index d98dabb3d..4fa9d8ea5 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -72,7 +72,7 @@ func ApplyMonitoringConsole(client splcommon.ControllerClient, cr splcommon.Meta addNewURLs = false } - //get cluster info from cluster master + //get cluster info from cluster manager if cr.GetObjectKind().GroupVersionKind().Kind == "ClusterMaster" && !spec.Mock { mgr := monitoringConsolePodManager{cr: &cr, spec: &spec, secrets: secrets, newSplunkClient: splclient.NewSplunkClient} c := mgr.getClusterMasterClient(cr) @@ -115,7 +115,7 @@ func (mgr *monitoringConsolePodManager) getMonitoringConsoleClient(cr splcommon. return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(mgr.secrets.Data["password"])) } -// getClusterMasterClient for monitoringConsolePodManager returns a SplunkClient for cluster master +// getClusterMasterClient for monitoringConsolePodManager returns a SplunkClient for cluster manager func (mgr *monitoringConsolePodManager) getClusterMasterClient(cr splcommon.MetaObject) *splclient.SplunkClient { fqdnName := splcommon.GetServiceFQDN(cr.GetNamespace(), GetSplunkServiceName(SplunkClusterMaster, cr.GetName(), false)) return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(mgr.secrets.Data["password"])) @@ -208,7 +208,7 @@ func getMonitoringConsoleStatefulSet(client splcommon.ControllerClient, cr splco }, //Below requests/limits for MC are defined taking into account below EC2 validated architecture and its defined limits //1. https://www.splunk.com/pdfs/technical-briefs/deploying-splunk-enterprise-on-amazon-web-services-technical-brief.pdf - //defines the validate architecture for License Master and Monitoring console i.e, c5.2xlarge + //defines the validate architecture for License Manager and Monitoring console i.e, c5.2xlarge //2. (c5.2xlarge) architecture req from https://aws.amazon.com/ec2/instance-types/c5/ //defines that for c5.2xlarge architecture we need 8vCPU and 16Gi memory //since we only have MC here (as we have separate LM) so 4vCPU and 8Gi memory has been set as limit for MC pod diff --git a/pkg/splunk/enterprise/searchheadcluster.go b/pkg/splunk/enterprise/searchheadcluster.go index e2881c5c9..f8e8faba0 100644 --- a/pkg/splunk/enterprise/searchheadcluster.go +++ b/pkg/splunk/enterprise/searchheadcluster.go @@ -529,7 +529,7 @@ func getSearchHeadStatefulSet(client splcommon.ControllerClient, cr *enterpriseA return ss, nil } -// getDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license master. +// getDeployerStatefulSet returns a Kubernetes StatefulSet object for a Splunk Enterprise license manager. func getDeployerStatefulSet(client splcommon.ControllerClient, cr *enterpriseApi.SearchHeadCluster) (*appsv1.StatefulSet, error) { ss, err := getSplunkStatefulSet(client, cr, &cr.Spec.CommonSplunkSpec, SplunkDeployer, 1, getSearchHeadExtraEnv(cr, cr.Spec.Replicas)) if err != nil { diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 3ba167f0f..8ca2afc17 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -21,7 +21,7 @@ const ( // SplunkStandalone is a single instance of Splunk Enterprise SplunkStandalone InstanceType = "standalone" - // SplunkClusterMaster is the master node of an indexer cluster, see https://docs.splunk.com/Documentation/Splunk/latest/Indexer/Basicclusterarchitecture + // SplunkClusterMaster is the manager node of an indexer cluster, see https://docs.splunk.com/Documentation/Splunk/latest/Indexer/Basicclusterarchitecture SplunkClusterMaster InstanceType = "cluster-master" // SplunkSearchHead may be a standalone or clustered search head instance @@ -33,7 +33,7 @@ const ( // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" - // SplunkLicenseMaster controls one or more license slaves + // SplunkLicenseMaster controls one or more license nodes SplunkLicenseMaster InstanceType = "license-master" // SplunkMonitoringConsole is a single instance of Splunk monitor for mc @@ -67,7 +67,7 @@ func (instanceType InstanceType) ToRole() string { return role } -// ToKind returns master InstanceType for CRD that manages a given InstanceType +// ToKind returns manager InstanceType for CRD that manages a given InstanceType func (instanceType InstanceType) ToKind() string { var kind string switch instanceType { diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 743e67ad9..f795334de 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -151,7 +151,7 @@ func getStandaloneExtraEnv(cr splcommon.MetaObject, replicas int32) []corev1.Env } } -// getLicenseMasterURL returns URL of license master +// getLicenseMasterURL returns URL of license manager func getLicenseMasterURL(cr splcommon.MetaObject, spec *enterpriseApi.CommonSplunkSpec) []corev1.EnvVar { if spec.LicenseMasterRef.Name != "" { licenseMasterURL := GetSplunkServiceName(SplunkLicenseMaster, spec.LicenseMasterRef.Name, false) diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 501e9eea1..58e64f035 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -65,7 +65,7 @@ func TestApplySplunkConfig(t *testing.T) { searchHeadRevised.Spec.ClusterMasterRef.Name = "stack2" spltest.ReconcileTesterWithoutRedundantCheck(t, "TestApplySplunkConfig", &searchHeadCR, searchHeadRevised, createCalls, updateCalls, reconcile, false) - // test indexer with license master + // test indexer with license manager indexerCR := enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", @@ -281,7 +281,7 @@ func TestApplyAppListingConfigMap(t *testing.T) { cr.Kind = "SearchHeadCluster" testAppListingConfigMap(client, &cr, &cr.Spec.AppFrameworkConfig, cr.Status.AppContext.AppsSrcDeployStatus, `{"metadata":{"name":"splunk-example-searchheadcluster-app-list","namespace":"test","creationTimestamp":null,"ownerReferences":[{"apiVersion":"","kind":"SearchHeadCluster","name":"example","uid":"","controller":true}]},"data":{"app-list-cluster-with-pre-config.yaml":"splunk:\n apps_location:\n - \"/init-apps/appsWithPreConfigRequired/1_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/2_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/3_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/4_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/5_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/6_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/7_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/8_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/9_appWithPreConfigReqOne.tgz\"\n - \"/init-apps/appsWithPreConfigRequired/10_appWithPreConfigReqOne.tgz\"","app-list-cluster.yaml":"splunk:\n app_paths_install:\n shc:\n - \"/init-apps/securityApps/1_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/2_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/3_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/4_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/5_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/6_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/7_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/8_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/9_securityCategoryOne.tgz\"\n - \"/init-apps/securityApps/10_securityCategoryOne.tgz\"","app-list-local.yaml":"splunk:\n app_paths_install:\n default:\n - \"/init-apps/adminApps/1_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/2_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/3_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/4_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/5_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/6_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/7_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/8_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/9_adminCategoryOne.tgz\"\n - \"/init-apps/adminApps/10_adminCategoryOne.tgz\"","appsUpdateToken":"1601945361"}}`) - // Now test the Cluster master stateful set, to validate the Pod updates with the app listing config map + // Now test the Cluster manager stateful set, to validate the Pod updates with the app listing config map cr.Kind = "ClusterMaster" _, err = splutil.ApplyNamespaceScopedSecretObject(client, "test") if err != nil { diff --git a/test/README.md b/test/README.md index 2fe2d0d1b..87161e517 100644 --- a/test/README.md +++ b/test/README.md @@ -66,7 +66,7 @@ Note: To run a specific test, you can ### Circleci pipeline -The circleci config.xml file will also run the integration tests when merging to master branch. By default, the pipeline workflow will +The circleci config.xml file will also run the integration tests when merging to manager branch. By default, the pipeline workflow will deploy a KIND cluster and run the tests against it. To run the test againsts the EKS cluster, you will need to define the following project environment variables in the circleci console AWS_ACCESS_KEY_ID diff --git a/test/c3/appframework/appframework_test.go b/test/c3/appframework/appframework_test.go index e295f2e96..9abfd2cee 100644 --- a/test/c3/appframework/appframework_test.go +++ b/test/c3/appframework/appframework_test.go @@ -91,7 +91,7 @@ var _ = Describe("c3appfw test", func() { err := deployment.DeploySingleSiteClusterWithGivenAppFrameworkSpec(deployment.GetName(), indexerReplicas, true, appFrameworkSpec, 10) Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster with App framework") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase @@ -145,7 +145,7 @@ var _ = Describe("c3appfw test", func() { // Wait for the poll period for the apps to be downloaded time.Sleep(2 * time.Minute) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase @@ -217,7 +217,7 @@ var _ = Describe("c3appfw test", func() { // Ensure Indexer cluster go to Ready phase testenv.SingleSiteIndexersReady(deployment, testenvInstance) - // Verify New Indexer On Cluster Master + // Verify New Indexer On Cluster Manager indexerName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), scaledIndexerReplicas-1) testenvInstance.Log.Info("Checking for New Indexer On Cluster Master", "Indexer Name", indexerName) Expect(testenv.CheckIndexerOnCM(deployment, indexerName)).To(Equal(true)) @@ -259,7 +259,7 @@ var _ = Describe("c3appfw test", func() { // Wait for the poll period for the apps to be downloaded time.Sleep(2 * time.Minute) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase diff --git a/test/custom_resource_crud/custom_resource_crud_c3_test.go b/test/custom_resource_crud/custom_resource_crud_c3_test.go index 174a5dba9..796f96163 100644 --- a/test/custom_resource_crud/custom_resource_crud_c3_test.go +++ b/test/custom_resource_crud/custom_resource_crud_c3_test.go @@ -59,7 +59,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { err := deployment.DeploySingleSiteCluster(deployment.GetName(), 3, true /*shc*/) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the Cluster Master goes to Ready phase + // Ensure that the Cluster Manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure Indexers go to Ready phase @@ -145,7 +145,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { err := deployment.DeploySingleSiteCluster(deployment.GetName(), 3, true /*shc*/) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the Cluster Master goes to Ready phase + // Ensure that the Cluster Manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure Indexers go to Ready phase @@ -169,7 +169,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { // Verify Indexers PVCs (etc and var) exists testenv.VerifyPVCsPerDeployment(deployment, testenvInstance, "idxc-indexer", 3, true, verificationTimeout) - // Verify Cluster Master PVCs (etc and var) exists + // Verify Cluster Manager PVCs (etc and var) exists testenv.VerifyPVCsPerDeployment(deployment, testenvInstance, "cluster-master", 1, true, verificationTimeout) // Delete the Search Head Cluster @@ -184,7 +184,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { err = deployment.DeleteCR(idxc) Expect(err).To(Succeed(), "Unable to delete IDXC instance", "IDXC Name", idxc) - // Delete the Cluster Master + // Delete the Cluster Manager cm := &enterpriseApi.ClusterMaster{} deployment.GetInstance(deployment.GetName(), cm) err = deployment.DeleteCR(cm) @@ -199,7 +199,7 @@ var _ = Describe("Crcrud test for SVA C3", func() { // Verify Indexers PVCs (etc and var) have been deleted testenv.VerifyPVCsPerDeployment(deployment, testenvInstance, "idxc-indexer", 3, false, verificationTimeout) - // Verify Cluster Master PVCs (etc and var) have been deleted + // Verify Cluster Manager PVCs (etc and var) have been deleted testenv.VerifyPVCsPerDeployment(deployment, testenvInstance, "cluster-master", 1, false, verificationTimeout) }) }) diff --git a/test/custom_resource_crud/custom_resource_crud_m4_test.go b/test/custom_resource_crud/custom_resource_crud_m4_test.go index 79435b74f..2e08e5556 100644 --- a/test/custom_resource_crud/custom_resource_crud_m4_test.go +++ b/test/custom_resource_crud/custom_resource_crud_m4_test.go @@ -57,7 +57,7 @@ var _ = Describe("Crcrud test for SVA M4", func() { err := deployment.DeployMultisiteClusterWithSearchHead(deployment.GetName(), 1, siteCount) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase diff --git a/test/delete_cr/deletecr_test.go b/test/delete_cr/deletecr_test.go index 12ab2a681..0002b793d 100644 --- a/test/delete_cr/deletecr_test.go +++ b/test/delete_cr/deletecr_test.go @@ -57,7 +57,7 @@ var _ = Describe("DeleteCR test", func() { err := deployment.DeploySingleSiteCluster(deployment.GetName(), 3, true /*shc*/) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase diff --git a/test/env.sh b/test/env.sh index 7b1668b4e..c8e7d9d6f 100644 --- a/test/env.sh +++ b/test/env.sh @@ -10,7 +10,7 @@ : "${ECR_REGISTRY:=}" : "${VPC_PUBLIC_SUBNET_STRING:=}" : "${VPC_PRIVATE_SUBNET_STRING:=}" -# Below env variables required to run license master test cases +# Below env variables required to run license manager test cases : "${ENTERPRISE_LICENSE_PATH:=}" : "${TEST_S3_BUCKET:=}" # Below env variables requried to run remote indexes test cases diff --git a/test/licensemaster/lm_c3_test.go b/test/licensemaster/lm_c3_test.go index 39e431c96..718b5876b 100644 --- a/test/licensemaster/lm_c3_test.go +++ b/test/licensemaster/lm_c3_test.go @@ -61,7 +61,7 @@ var _ = Describe("Licensemaster test", func() { err = deployment.DeploySingleSiteCluster(deployment.GetName(), 3, true /*shc*/) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase diff --git a/test/licensemaster/lm_m4_test.go b/test/licensemaster/lm_m4_test.go index 326ecca2f..a3e9fcf86 100644 --- a/test/licensemaster/lm_m4_test.go +++ b/test/licensemaster/lm_m4_test.go @@ -56,7 +56,7 @@ var _ = Describe("Licensemaster test", func() { err = deployment.DeployMultisiteClusterWithSearchHead(deployment.GetName(), 1, siteCount) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase diff --git a/test/licensemaster/lm_s1_test.go b/test/licensemaster/lm_s1_test.go index 51490e842..672995296 100644 --- a/test/licensemaster/lm_s1_test.go +++ b/test/licensemaster/lm_s1_test.go @@ -52,11 +52,11 @@ var _ = Describe("Licensemaster test", func() { // Create License Config Map testenvInstance.CreateLicenseConfigMap(licenseFilePath) - // Create standalone Deployment with License Master + // Create standalone Deployment with License Manager standalone, err := deployment.DeployStandaloneWithLM(deployment.GetName()) Expect(err).To(Succeed(), "Unable to deploy standalone instance with LM") - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) // Wait for Standalone to be in READY status diff --git a/test/m4/appframework/appframework_test.go b/test/m4/appframework/appframework_test.go index 7336c171d..9ecd0050c 100644 --- a/test/m4/appframework/appframework_test.go +++ b/test/m4/appframework/appframework_test.go @@ -92,7 +92,7 @@ var _ = Describe("m4appfw test", func() { err := deployment.DeployMultisiteClusterWithSearchHeadAndAppFramework(deployment.GetName(), indexersPerSite, siteCount, appFrameworkSpec, true, 10) Expect(err).To(Succeed(), "Unable to deploy Multi Site Indexer Cluster with App framework") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase @@ -146,7 +146,7 @@ var _ = Describe("m4appfw test", func() { // Wait for the poll period for the apps to be downloaded time.Sleep(2 * time.Minute) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase @@ -192,7 +192,7 @@ var _ = Describe("m4appfw test", func() { // Wait for the poll period for the apps to be downloaded time.Sleep(2 * time.Minute) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase diff --git a/test/monitoring_console/monitoring_console_test.go b/test/monitoring_console/monitoring_console_test.go index 34674533f..f5b706aac 100644 --- a/test/monitoring_console/monitoring_console_test.go +++ b/test/monitoring_console/monitoring_console_test.go @@ -210,7 +210,7 @@ var _ = Describe("Monitoring Console test", func() { err := deployment.DeploySingleSiteCluster(deployment.GetName(), defaultIndexerReplicas, true) Expect(err).To(Succeed(), "Unable to deploy search head cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase diff --git a/test/scaling_test/scaling_test.go b/test/scaling_test/scaling_test.go index ffd2a8881..4b494a818 100644 --- a/test/scaling_test/scaling_test.go +++ b/test/scaling_test/scaling_test.go @@ -117,7 +117,7 @@ var _ = Describe("Scaling test", func() { err := deployment.DeploySingleSiteCluster(deployment.GetName(), defaultIndexerReplicas, true) Expect(err).To(Succeed(), "Unable to deploy search head cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase @@ -168,7 +168,7 @@ var _ = Describe("Scaling test", func() { // Ensure Indexer cluster go to Ready phase testenv.SingleSiteIndexersReady(deployment, testenvInstance) - // Verify New Indexer On Cluster Master + // Verify New Indexer On Cluster Manager indexerName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), scaledIndexerReplicas-1) testenvInstance.Log.Info("Checking for Indexer On CM", "Indexer Name", indexerName) Expect(testenv.CheckIndexerOnCM(deployment, indexerName)).To(Equal(true)) @@ -188,7 +188,7 @@ var _ = Describe("Scaling test", func() { // Wait for Monitoring Console Pod to be in READY status testenv.MCPodReady(testenvInstance.GetName(), deployment) - // Verify New SearchHead is added to Cluster Master + // Verify New SearchHead is added to Cluster Manager searchHeadName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), scaledSHReplicas-1) testenvInstance.Log.Info("Checking for Search Head On CM", "Search Head Name", searchHeadName) Expect(testenv.CheckSearchHeadOnCM(deployment, searchHeadName)).To(Equal(true)) @@ -270,7 +270,7 @@ var _ = Describe("Scaling test", func() { err := deployment.DeployMultisiteClusterWithSearchHead(deployment.GetName(), defaultIndexerReplicas, siteCount) Expect(err).To(Succeed(), "Unable to deploy search head cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase @@ -320,7 +320,7 @@ var _ = Describe("Scaling test", func() { testenv.CreateMockLogfile(logFile, 2000) testenv.IngestFileViaMonitor(logFile, "main", podName, deployment) - // Verify New Indexer On Cluster Master + // Verify New Indexer On Cluster Manager indexerName := podName testenvInstance.Log.Info("Checking for Indexer On CM", "Indexer Name", indexerName) Expect(testenv.CheckIndexerOnCM(deployment, indexerName)).To(Equal(true)) diff --git a/test/secret/secret_c3_test.go b/test/secret/secret_c3_test.go index 1b9d826d7..3dc810c9a 100644 --- a/test/secret/secret_c3_test.go +++ b/test/secret/secret_c3_test.go @@ -63,10 +63,10 @@ var _ = Describe("Secret Test for SVA C3", func() { err = deployment.DeploySingleSiteCluster(deployment.GetName(), 3, true) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase @@ -95,13 +95,13 @@ var _ = Describe("Secret Test for SVA C3", func() { err = testenv.ModifySecretObject(deployment, testenvInstance.GetName(), namespaceScopedSecretName, updatedSecretData) Expect(err).To(Succeed(), "Unable to update secret Object") - // Ensure that Cluster Master goes to update phase + // Ensure that Cluster Manager goes to update phase testenv.VerifyClusterMasterPhase(deployment, testenvInstance, splcommon.PhaseUpdating) - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase diff --git a/test/secret/secret_m4_test.go b/test/secret/secret_m4_test.go index 265740684..4ae0b3dfc 100644 --- a/test/secret/secret_m4_test.go +++ b/test/secret/secret_m4_test.go @@ -64,10 +64,10 @@ var _ = Describe("Secret Test for M4 SVA", func() { err = deployment.DeployMultisiteClusterWithSearchHead(deployment.GetName(), 1, siteCount) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase @@ -103,13 +103,13 @@ var _ = Describe("Secret Test for M4 SVA", func() { err = testenv.ModifySecretObject(deployment, testenvInstance.GetName(), namespaceScopedSecretName, updatedSecretData) Expect(err).To(Succeed(), "Unable to update secret Object") - // Ensure that Cluster Master goes to update phase + // Ensure that Cluster Manager goes to update phase testenv.VerifyClusterMasterPhase(deployment, testenvInstance, splcommon.PhaseUpdating) - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase diff --git a/test/secret/secret_s1_test.go b/test/secret/secret_s1_test.go index dbeb86be2..f65817995 100644 --- a/test/secret/secret_s1_test.go +++ b/test/secret/secret_s1_test.go @@ -60,11 +60,11 @@ var _ = Describe("Secret Test for SVA S1", func() { // Create License Config Map testenvInstance.CreateLicenseConfigMap(licenseFilePath) - // Create standalone Deployment with License Master + // Create standalone Deployment with License Manager standalone, err := deployment.DeployStandaloneWithLM(deployment.GetName()) Expect(err).To(Succeed(), "Unable to deploy standalone instance with LM") - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) // Wait for Standalone to be in READY status @@ -90,7 +90,7 @@ var _ = Describe("Secret Test for SVA S1", func() { // Ensure standalone is updating testenv.VerifyStandalonePhase(deployment, testenvInstance, deployment.GetName(), splcommon.PhaseUpdating) - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) // Wait for Standalone to be in READY status @@ -140,11 +140,11 @@ var _ = Describe("Secret Test for SVA S1", func() { // Create License Config Map testenvInstance.CreateLicenseConfigMap(licenseFilePath) - // Create standalone Deployment with License Master + // Create standalone Deployment with License Manager standalone, err := deployment.DeployStandaloneWithLM(deployment.GetName()) Expect(err).To(Succeed(), "Unable to deploy standalone instance with LM") - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) // Wait for Standalone to be in READY status @@ -166,7 +166,7 @@ var _ = Describe("Secret Test for SVA S1", func() { // Ensure standalone is updating testenv.VerifyStandalonePhase(deployment, testenvInstance, deployment.GetName(), splcommon.PhaseUpdating) - // Wait for License Master to be in READY status + // Wait for License Manager to be in READY status testenv.LicenseMasterReady(deployment, testenvInstance) // Wait for Standalone to be in READY status @@ -208,7 +208,7 @@ var _ = Describe("Secret Test for SVA S1", func() { 4. Verify New Secrets are present in server.conf (Pass4SymmKey) 5. Verify New Secrets via api access (password)*/ - // Create standalone Deployment with License Master + // Create standalone Deployment with License Manager standalone, err := deployment.DeployStandalone(deployment.GetName()) Expect(err).To(Succeed(), "Unable to deploy standalone instance with LM") diff --git a/test/smartstore/smartstore_test.go b/test/smartstore/smartstore_test.go index 9ae049e07..da3192537 100644 --- a/test/smartstore/smartstore_test.go +++ b/test/smartstore/smartstore_test.go @@ -162,7 +162,7 @@ var _ = Describe("Smartstore test", func() { err := deployment.DeployMultisiteClusterWithSearchHeadAndIndexes(deployment.GetName(), 1, siteCount, testenvInstance.GetIndexSecretName(), smartStoreSpec) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase diff --git a/test/smoke/cluster_master_sites_response.go b/test/smoke/cluster_master_sites_response.go index d3bbed45d..7ce0c5c4e 100644 --- a/test/smoke/cluster_master_sites_response.go +++ b/test/smoke/cluster_master_sites_response.go @@ -14,7 +14,7 @@ package smoke -// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master +// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-manager // Endpoint: /services/cluster/master/sites type ClusterMasterSitesResponse struct { Entries []ClusterMasterSitesEntry `json:"entry"` diff --git a/test/smoke/smoke_test.go b/test/smoke/smoke_test.go index c98b899b7..ec34987bb 100644 --- a/test/smoke/smoke_test.go +++ b/test/smoke/smoke_test.go @@ -66,7 +66,7 @@ var _ = Describe("Smoke test", func() { err := deployment.DeploySingleSiteCluster(deployment.GetName(), 3, true /*shc*/) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure indexers go to Ready phase @@ -90,7 +90,7 @@ var _ = Describe("Smoke test", func() { err := deployment.DeployMultisiteClusterWithSearchHead(deployment.GetName(), 1, siteCount) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase @@ -117,7 +117,7 @@ var _ = Describe("Smoke test", func() { err := deployment.DeployMultisiteCluster(deployment.GetName(), 1, siteCount) Expect(err).To(Succeed(), "Unable to deploy cluster") - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase testenv.ClusterMasterReady(deployment, testenvInstance) // Ensure the indexers of all sites go to Ready phase @@ -150,7 +150,7 @@ var _ = Describe("Smoke test", func() { }, } - // Create standalone Deployment with License Master + // Create standalone Deployment with License Manager standalone, err := deployment.DeployStandalonewithGivenSpec(deployment.GetName(), standaloneSpec) Expect(err).To(Succeed(), "Unable to deploy standalone instance with LM") diff --git a/test/testenv/appframework_utils.go b/test/testenv/appframework_utils.go index 3846fe95a..ae62d9f90 100644 --- a/test/testenv/appframework_utils.go +++ b/test/testenv/appframework_utils.go @@ -46,7 +46,7 @@ func GenerateAppSourceSpec(appSourceName string, appSourceLocation string, appSo // GetPodAppStatus Get the app install status and version number func GetPodAppStatus(deployment *Deployment, podName string, ns string, appname string, clusterWideInstall bool) (string, string, error) { - // For clusterwide install do not check for versions on deployer and cluster-master as the apps arent installed there + // For clusterwide install do not check for versions on deployer and cluster-manager as the apps arent installed there if clusterWideInstall && (strings.Contains(podName, "-cluster-master-") || strings.Contains(podName, "-deployer-")) { logf.Log.Info("Pod skipped as install is Cluter-wide", "PodName", podName) return "", "", nil diff --git a/test/testenv/cmutil.go b/test/testenv/cmutil.go index 687a11ec7..a03cfd01e 100644 --- a/test/testenv/cmutil.go +++ b/test/testenv/cmutil.go @@ -22,7 +22,7 @@ import ( "strings" ) -// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master +// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-manager // Endpoint: /services/cluster/master/sites type ClusterMasterSitesResponse struct { Entries []ClusterMasterSitesEntry `json:"entry"` @@ -44,7 +44,7 @@ type ClusterMasterSitesPeer struct { ServerName string `json:"server_name"` } -// ClusterMasterHealthResponse is a representation of the health response by a Splunk cluster-master +// ClusterMasterHealthResponse is a representation of the health response by a Splunk cluster-manager // Endpoint: /services/cluster/master/health type ClusterMasterHealthResponse struct { Entries []ClusterMasterHealthEntry `json:"entry"` @@ -115,7 +115,7 @@ type ClusterMasterPeersAndSearchHeadResponse struct { } `json:"entry"` } -// GetIndexersOrSearchHeadsOnCM get indexers or search head on Cluster Master +// GetIndexersOrSearchHeadsOnCM get indexers or search head on Cluster Manager func GetIndexersOrSearchHeadsOnCM(deployment *Deployment, endpoint string) ClusterMasterPeersAndSearchHeadResponse { url := "" if endpoint == "sh" { @@ -141,7 +141,7 @@ func GetIndexersOrSearchHeadsOnCM(deployment *Deployment, endpoint string) Clust return restResponse } -// CheckIndexerOnCM check given Indexer on cluster master +// CheckIndexerOnCM check given Indexer on cluster manager func CheckIndexerOnCM(deployment *Deployment, indexerName string) bool { restResponse := GetIndexersOrSearchHeadsOnCM(deployment, "peer") found := false @@ -155,7 +155,7 @@ func CheckIndexerOnCM(deployment *Deployment, indexerName string) bool { return found } -// CheckSearchHeadOnCM check given search head on cluster master +// CheckSearchHeadOnCM check given search head on cluster manager func CheckSearchHeadOnCM(deployment *Deployment, searchHeadName string) bool { restResponse := GetIndexersOrSearchHeadsOnCM(deployment, "sh") found := false diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 371ea7028..0be75f610 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -157,7 +157,7 @@ func (d *Deployment) PodExecCommand(podName string, cmd []string, stdin string, return stdout.String(), stderr.String(), nil } -//DeployLicenseMaster deploys the license master instance +//DeployLicenseMaster deploys the license manager instance func (d *Deployment) DeployLicenseMaster(name string) (*enterpriseApi.LicenseMaster, error) { if d.testenv.licenseFilePath == "" { @@ -172,7 +172,7 @@ func (d *Deployment) DeployLicenseMaster(name string) (*enterpriseApi.LicenseMas return deployed.(*enterpriseApi.LicenseMaster), err } -//DeployClusterMaster deploys the cluster master +//DeployClusterMaster deploys the cluster manager func (d *Deployment) DeployClusterMaster(name, licenseMasterName string, ansibleConfig string) (*enterpriseApi.ClusterMaster, error) { d.testenv.Log.Info("Deploying cluster-master", "name", name) cm := newClusterMaster(name, d.testenv.namespace, licenseMasterName, ansibleConfig) @@ -183,7 +183,7 @@ func (d *Deployment) DeployClusterMaster(name, licenseMasterName string, ansible return deployed.(*enterpriseApi.ClusterMaster), err } -//DeployClusterMasterWithSmartStoreIndexes deploys the cluster master with smartstore indexes +//DeployClusterMasterWithSmartStoreIndexes deploys the cluster manager with smartstore indexes func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(name, licenseMasterName string, ansibleConfig string, smartstorespec enterpriseApi.SmartStoreSpec) (*enterpriseApi.ClusterMaster, error) { d.testenv.Log.Info("Deploying cluster-master", "name", name) cm := newClusterMasterWithGivenIndexes(name, d.testenv.namespace, licenseMasterName, ansibleConfig, smartstorespec) @@ -282,9 +282,9 @@ func (d *Deployment) DeploySingleSiteCluster(name string, indexerReplicas int, s var licenseMaster string - // If license file specified, deploy License Master + // If license file specified, deploy License Manager if d.testenv.licenseFilePath != "" { - // Deploy the license master + // Deploy the license manager _, err := d.DeployLicenseMaster(name) if err != nil { return err @@ -293,7 +293,7 @@ func (d *Deployment) DeploySingleSiteCluster(name string, indexerReplicas int, s licenseMaster = name } - // Deploy the cluster master + // Deploy the cluster manager _, err := d.DeployClusterMaster(name, licenseMaster, "") if err != nil { return err @@ -316,14 +316,14 @@ func (d *Deployment) DeploySingleSiteCluster(name string, indexerReplicas int, s return nil } -// DeployMultisiteClusterWithSearchHead deploys a lm, cluster-master, indexers in multiple sites and SH clusters +// DeployMultisiteClusterWithSearchHead deploys a lm, cluster-manager, indexers in multiple sites and SH clusters func (d *Deployment) DeployMultisiteClusterWithSearchHead(name string, indexerReplicas int, siteCount int) error { var licenseMaster string - // If license file specified, deploy License Master + // If license file specified, deploy License Manager if d.testenv.licenseFilePath != "" { - // Deploy the license master + // Deploy the license manager _, err := d.DeployLicenseMaster(name) if err != nil { return err @@ -332,7 +332,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(name string, indexerRe licenseMaster = name } - // Deploy the cluster-master + // Deploy the cluster-manager defaults := `splunk: multisite_master: localhost all_sites: site1,site2,site3 @@ -375,14 +375,14 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(name string, indexerRe return nil } -// DeployMultisiteCluster deploys a lm, cluster-master, indexers in multiple sites +// DeployMultisiteCluster deploys a lm, cluster-manager, indexers in multiple sites func (d *Deployment) DeployMultisiteCluster(name string, indexerReplicas int, siteCount int) error { var licenseMaster string - // If license file specified, deploy License Master + // If license file specified, deploy License Manager if d.testenv.licenseFilePath != "" { - // Deploy the license master + // Deploy the license manager _, err := d.DeployLicenseMaster(name) if err != nil { return err @@ -391,7 +391,7 @@ func (d *Deployment) DeployMultisiteCluster(name string, indexerReplicas int, si licenseMaster = name } - // Deploy the cluster-master + // Deploy the cluster-manager defaults := `splunk: multisite_master: localhost all_sites: site1,site2,site3 @@ -425,13 +425,13 @@ func (d *Deployment) DeployMultisiteCluster(name string, indexerReplicas int, si return nil } -// DeployStandaloneWithLM deploys a standalone splunk enterprise instance with license master on the specified testenv +// DeployStandaloneWithLM deploys a standalone splunk enterprise instance with license manager on the specified testenv func (d *Deployment) DeployStandaloneWithLM(name string) (*enterpriseApi.Standalone, error) { var licenseMaster string - // If license file specified, deploy License Master + // If license file specified, deploy License Manager if d.testenv.licenseFilePath != "" { - // Deploy the license master + // Deploy the license manager _, err := d.DeployLicenseMaster(name) if err != nil { return nil, err @@ -478,14 +478,14 @@ func (d *Deployment) DeployStandaloneWithGivenSmartStoreSpec(name string, smartS return deployed.(*enterpriseApi.Standalone), err } -// DeployMultisiteClusterWithSearchHeadAndIndexes deploys a lm, cluster-master, indexers in multiple sites and SH clusters +// DeployMultisiteClusterWithSearchHeadAndIndexes deploys a lm, cluster-manager, indexers in multiple sites and SH clusters func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(name string, indexerReplicas int, siteCount int, indexesSecret string, smartStoreSpec enterpriseApi.SmartStoreSpec) error { var licenseMaster string - // If license file specified, deploy License Master + // If license file specified, deploy License Manager if d.testenv.licenseFilePath != "" { - // Deploy the license master + // Deploy the license manager _, err := d.DeployLicenseMaster(name) if err != nil { return err @@ -494,7 +494,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(name string, licenseMaster = name } - // Deploy the cluster-master + // Deploy the cluster-manager defaults := `splunk: multisite_master: localhost all_sites: site1,site2,site3 @@ -533,7 +533,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(name string, return err } -// DeployClusterMasterWithGivenSpec deploys the cluster master with given SPEC +// DeployClusterMasterWithGivenSpec deploys the cluster manager with given SPEC func (d *Deployment) DeployClusterMasterWithGivenSpec(name string, spec enterpriseApi.ClusterMasterSpec) (*enterpriseApi.ClusterMaster, error) { d.testenv.Log.Info("Deploying cluster-master", "name", name) cm := newClusterMasterWithGivenSpec(name, d.testenv.namespace, spec) @@ -552,7 +552,7 @@ func (d *Deployment) DeploySearchHeadClusterWithGivenSpec(name string, spec ente return deployed.(*enterpriseApi.SearchHeadCluster), err } -// DeployLicenseMasterWithGivenSpec deploys the license master with given SPEC +// DeployLicenseMasterWithGivenSpec deploys the license manager with given SPEC func (d *Deployment) DeployLicenseMasterWithGivenSpec(name string, spec enterpriseApi.LicenseMasterSpec) (*enterpriseApi.LicenseMaster, error) { d.testenv.Log.Info("Deploying license-master", "name", name) lm := newLicenseMasterWithGivenSpec(name, d.testenv.namespace, spec) @@ -568,9 +568,9 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(name strin licenseMaster := "" - // If license file specified, deploy License Master + // If license file specified, deploy License Manager if d.testenv.licenseFilePath != "" { - // Deploy the license master + // Deploy the license manager _, err := d.DeployLicenseMaster(name) if err != nil { return err @@ -579,7 +579,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(name strin licenseMaster = name } - // Deploy the cluster master + // Deploy the cluster manager cmSpec := enterpriseApi.ClusterMasterSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: splcommon.Spec{ @@ -633,14 +633,14 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(name strin return nil } -// DeployMultisiteClusterWithSearchHeadAndAppFramework deploys cluster-master, indexers in multiple sites (SHC LM Optional) with app framework spec +// DeployMultisiteClusterWithSearchHeadAndAppFramework deploys cluster-manager, indexers in multiple sites (SHC LM Optional) with app framework spec func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(name string, indexerReplicas int, siteCount int, appFrameworkSpec enterpriseApi.AppFrameworkSpec, shc bool, delaySeconds int) error { licenseMaster := "" - // If license file specified, deploy License Master + // If license file specified, deploy License Manager if d.testenv.licenseFilePath != "" { - // Deploy the license master + // Deploy the license manager _, err := d.DeployLicenseMaster(name) if err != nil { return err @@ -649,7 +649,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(name st licenseMaster = name } - // Deploy the cluster-master + // Deploy the cluster-manager defaults := `splunk: multisite_master: localhost all_sites: site1,site2,site3 @@ -663,7 +663,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(name st replication_factor: 2 ` - // Cluster Master Spec + // Cluster Manager Spec cmSpec := enterpriseApi.ClusterMasterSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Spec: splcommon.Spec{ diff --git a/test/testenv/util.go b/test/testenv/util.go index 96d92eadc..ecbf3d738 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -402,7 +402,7 @@ func newOperator(name, ns, account, operatorImageAndTag, splunkEnterpriseImageAn return &operator } -// newStandaloneWithLM creates and initializes CR for Standalone Kind with License Master +// newStandaloneWithLM creates and initializes CR for Standalone Kind with License Manager func newStandaloneWithLM(name, ns string, licenseMasterName string) *enterpriseApi.Standalone { new := enterpriseApi.Standalone{ @@ -586,7 +586,7 @@ func newSearchHeadClusterWithGivenSpec(name string, ns string, spec enterpriseAp return &new } -// newLicenseMasterWithGivenSpec create and initializes CR for License Master Kind with Given Spec +// newLicenseMasterWithGivenSpec create and initializes CR for License Manager Kind with Given Spec func newLicenseMasterWithGivenSpec(name, ns string, spec enterpriseApi.LicenseMasterSpec) *enterpriseApi.LicenseMaster { new := enterpriseApi.LicenseMaster{ TypeMeta: metav1.TypeMeta{ diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index 7753948e5..0940b29c5 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -140,9 +140,9 @@ func SingleSiteIndexersReady(deployment *Deployment, testenvInstance *TestEnv) { }, ConsistentDuration, ConsistentPollInterval).Should(gomega.Equal(splcommon.PhaseReady)) } -// ClusterMasterReady verify Cluster Master Instance is in ready status +// ClusterMasterReady verify Cluster Manager Instance is in ready status func ClusterMasterReady(deployment *Deployment, testenvInstance *TestEnv) { - // Ensure that the cluster-master goes to Ready phase + // Ensure that the cluster-manager goes to Ready phase cm := &enterpriseApi.ClusterMaster{} gomega.Eventually(func() splcommon.Phase { err := deployment.GetInstance(deployment.GetName(), cm) @@ -155,7 +155,7 @@ func ClusterMasterReady(deployment *Deployment, testenvInstance *TestEnv) { return cm.Status.Phase }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(splcommon.PhaseReady)) - // In a steady state, cluster-master should stay in Ready and not flip-flop around + // In a steady state, cluster-manager should stay in Ready and not flip-flop around gomega.Consistently(func() splcommon.Phase { _ = deployment.GetInstance(deployment.GetName(), cm) return cm.Status.Phase @@ -229,7 +229,7 @@ func VerifyRFSFMet(deployment *Deployment, testenvInstance *TestEnv) { }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) } -// VerifyNoDisconnectedSHPresentOnCM is present on cluster master +// VerifyNoDisconnectedSHPresentOnCM is present on cluster manager func VerifyNoDisconnectedSHPresentOnCM(deployment *Deployment, testenvInstance *TestEnv) { gomega.Consistently(func() bool { shStatus := CheckSearchHeadRemoved(deployment) @@ -433,7 +433,7 @@ func VerifyCPULimits(deployment *Deployment, ns string, podName string, expected }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) } -// VerifyClusterMasterPhase verify phase of cluster master +// VerifyClusterMasterPhase verify phase of cluster manager func VerifyClusterMasterPhase(deployment *Deployment, testenvInstance *TestEnv, phase splcommon.Phase) { cm := &enterpriseApi.ClusterMaster{} gomega.Eventually(func() splcommon.Phase { @@ -608,7 +608,7 @@ func VerifyAppInstalled(deployment *Deployment, testenvInstance *TestEnv, ns str } if versionCheck { - // For clusterwide install do not check for versions on deployer and cluster-master as the apps arent installed there + // For clusterwide install do not check for versions on deployer and cluster-manager as the apps arent installed there if !(clusterWideInstall && (strings.Contains(podName, "-deployer-") || strings.Contains(podName, "-cluster-master-"))) { var expectedVersion string if checkupdated { From 1204d986c43d4a9ed3f4508f43952977d7c4d730 Mon Sep 17 00:00:00 2001 From: mgaldino Date: Thu, 9 Sep 2021 09:33:19 -0700 Subject: [PATCH 2/2] Addressing review comments --- docs/Examples.md | 4 ++-- pkg/apis/enterprise/v2/indexercluster_types.go | 2 +- pkg/splunk/client/enterprise.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/Examples.md b/docs/Examples.md index afb46b327..118389beb 100644 --- a/docs/Examples.md +++ b/docs/Examples.md @@ -460,7 +460,7 @@ spec: ### Example: Cluster Manager -In the ClusterMaster example, app3 and app4 are installed on any indexer instances that are managed by the cluster manager. App5 and app6 are installed locally on the ClusterMaster instance. +In this example, app3 and app4 are installed on any indexer instances that are managed by the cluster manager. App5 and app6 are installed locally on the ClusterMaster instance. ```yaml apiVersion: enterprise.splunk.com/v2 @@ -725,7 +725,7 @@ There are two ways to configure `pass4Symmkey` with an External LM: ``` - Setup the above decrypted plain-text [`pass4Symmkey`](PasswordManagement.md#pass4Symmkey) in the global secret object(Note: The `pass4Symmkey` would be stored in a base64 encoded format). For details see [updating global kubernetes secret object](#updating-global-kubernetes-secret-object) -### Configuring license_manager_url: +### Configuring license_master_url: Assuming that the hostname for your LM is `license-master.splunk.mydomain.com`, you should create a `default.yml` file with the following contents: diff --git a/pkg/apis/enterprise/v2/indexercluster_types.go b/pkg/apis/enterprise/v2/indexercluster_types.go index ca76199ce..6d154350f 100644 --- a/pkg/apis/enterprise/v2/indexercluster_types.go +++ b/pkg/apis/enterprise/v2/indexercluster_types.go @@ -106,7 +106,7 @@ type IndexerClusterStatus struct { // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:resource:path=indexerclusters,scope=Namespaced,shortName=idc;idxc // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of indexer cluster" -// +kubebuilder:printcolumn:name="Manager",type="string",JSONPath=".status.clusterMasterPhase",description="Status of cluster manager" +// +kubebuilder:printcolumn:name="Master",type="string",JSONPath=".status.clusterMasterPhase",description="Status of cluster master" // +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".status.replicas",description="Desired number of indexer peers" // +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Current number of ready indexer peers" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of indexer cluster" diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go index b145784ca..4ed7382c7 100644 --- a/pkg/splunk/client/enterprise.go +++ b/pkg/splunk/client/enterprise.go @@ -471,7 +471,7 @@ func (c *SplunkClient) GetIndexerClusterPeerInfo() (*IndexerClusterPeerInfo, err } // ClusterMasterPeerInfo represents the status of a indexer cluster peer (cluster manager endpoint). -// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmaster.2Fpeers +// See https://docs.splunk.com/Documentation/Splunk/latest/RESTREF/RESTcluster#cluster.2Fmanager.2Fpeers type ClusterMasterPeerInfo struct { // Unique identifier or GUID for the peer ID string `json:"guid"`