diff --git a/.circleci/config.yml b/.circleci/config.yml index b45ff0788..4694167aa 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -43,7 +43,22 @@ workflows: - unit-tests - vulnerability-scan: requires: - - build-image + - build-image + monitoring-console-integration-test: + jobs: + - build-image + - unit-tests + - monitoring-console-tests: + requires: + - build-image + - unit-tests + filters: + branches: + only: + - feature-MC/CRD + - integration-tests: + requires: + - monitoring-console-tests build-and-push: jobs: - build-image @@ -435,3 +450,61 @@ jobs: command: | make cluster-down no_output_timeout: 30m + # Runs integration tests against a k8s cluster + monitoring-console-tests: + executor: ubuntu-machine + steps: + - run: + name: Setup Splunk operator and enterprise image env vars + command: | + echo 'export SPLUNK_OPERATOR_IMAGE=${IMAGE_NAME}:${CIRCLE_SHA1}' >> $BASH_ENV + echo 'export SPLUNK_ENTERPRISE_IMAGE=${ENTERPRISE_IMAGE_NAME}' >> $BASH_ENV + echo 'export COMMIT_HASH=$(echo ${CIRCLE_SHA1:0:7})' >> $BASH_ENV + - kubernetes/install + - aws-cli/install + - aws-eks/install-eksctl + - run: + name: Install kind tool + command: | + curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.7.0/kind-$(uname)-amd64 + chmod +x ./kind + sudo mv ./kind /usr/local/bin + - checkout + - attach_workspace: + name: Restore workspace + at: /tmp + # load the operator image to local registry in the VM + - load_image + - run: + name: Print out version and environment + command: | + ls -al + echo "GO VERSION=`go version`" + echo "CIRCLE_SHA1=$CIRCLE_SHA1" + echo "SPLUNK_OPERATOR_IMAGE=$SPLUNK_OPERATOR_IMAGE" + echo "SPLUNK_ENTEPRISE_IMAGE=$SPLUNK_ENTERPRISE_IMAGE" + echo "PRIVATE_REGISTRY=$PRIVATE_REGISTRY" + echo "CLUSTER_PROVIDER=$CLUSTER_PROVIDER" + - run: + # Deploys a eks or kind cluster depending of CLUSTER_PROVIDER flag. If cluster already exists, + # it will skip. Uses NUM_WORKERS for size of cluster + name: Deploy k8s cluster + command: | + make cluster-up + kubectl version # log the k8s version + no_output_timeout: 30m + - run: + # Run the integration tests againsts the cluster deployed above. + # Test againsts the SPLUNK_OPERATOR_IMAGE and SPLUNK_ENTERPRISE_IMAGE + name: Run integration tests + command: | + make int-test + mkdir -p /tmp/test-results + find ./test -name "*junit.xml" -exec cp {} /tmp/test-results \; + environment: + TEST_FOCUS: "monitoring_console" + - store_test_results: + name: Save test results + path: /tmp/test-results + - store_artifacts: + path: /tmp/test-results diff --git a/test/monitoring_console/monitoring_console_test.go b/test/monitoring_console/monitoring_console_test.go index ed6e126af..5545d62ce 100644 --- a/test/monitoring_console/monitoring_console_test.go +++ b/test/monitoring_console/monitoring_console_test.go @@ -50,37 +50,131 @@ var _ = Describe("Monitoring Console test", func() { Context("Deploy Monitoring Console", func() { It("smoke, monitoring_console: can deploy MC CR", func() { + /* + Test Steps + 1. Deploy Monitoring Console + 2. Deploy Standalone + 3. Wait for Monitoring Console status to go back to READY + 4. Verify Standalone configured in Monitoring Console Config Map + 5. Verify Monitoring Console Pod has correct peers in Peer List + */ + + // Deploy Monitoring Console CRD mc, err := deployment.DeployMonitoringConsole(deployment.GetName(), "") Expect(err).To(Succeed(), "Unable to deploy Monitoring Console instance") + // Verify Monitoring Console is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(deployment, deployment.GetName(), mc, testenvInstance) + + // Create Standalone Spec and apply + standaloneOneName := deployment.GetName() + mcName := deployment.GetName() + spec := enterprisev1.StandaloneSpec{ + CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + } + standaloneOne, err := deployment.DeployStandaloneWithGivenSpec(standaloneOneName, spec) + Expect(err).To(Succeed(), "Unable to deploy standalone instance") + + // Wait for standalone to be in READY Status + testenv.StandaloneReady(deployment, deployment.GetName(), standaloneOne, testenvInstance) + + // Wait for MC to go to Updating Phase + testenv.VerifyMonitoringConsolePhase(deployment, testenvInstance, deployment.GetName(), splcommon.PhaseUpdating) + // Verify MC is Ready and stays in ready state testenv.VerifyMonitoringConsoleReady(deployment, deployment.GetName(), mc, testenvInstance) + + // Get contents of MC config map + mcConfigMap, err := testenv.GetMCConfigMap(deployment, testenvInstance.GetName(), mcName) + Expect(err).To(Succeed(), "Unable to get MC config map") + + // Check Standalone is configure in MC Config Map + podName := fmt.Sprintf(testenv.StandalonePod, standaloneOneName, 0) + Expect(testenv.CheckPodNameInString(podName, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(true)) + + // Check Monitoring console is configured with all standalone instances in namespace + peerList := testenv.GetConfiguredPeers(testenvInstance.GetName(), deployment.GetName()) + testenvInstance.Log.Info("Peer List", "instance", peerList) + + // Only 1 peer expected in MC peer list + Expect(len(peerList)).To(Equal(1)) + + testenvInstance.Log.Info("Check standalone instance in MC Peer list", "Standalone Pod", podName, "Peer in peer list", peerList[0]) + Expect(strings.Contains(peerList[0], podName)).To(Equal(true)) + }) }) - XContext("Standalone deployment (S1)", func() { + Context("Standalone deployment (S1)", func() { It("monitoring_console, integration: can deploy a MC with standalone instance and update MC with new standalone deployment", func() { + /* + Test Steps + 1. Deploy Standalone + 2. Wait for Standalone to go to READY + 3. Deploy Monitoring Console + 4. Wait for Monitoring Console status to be READY + 5. Verify Standalone configured in Monitoring Console Config Map + 6. Verify Monitoring Console Pod has correct peers in Peer List + 7. Deploy 2nd Standalone + 8. Wait for Second Standalone to be READY + 9. Wait for Monitoring Console status to go UPDATING then READY + 10. Verify both Standalone configured in Monitoring Console Config Map + 11. Verify both Standalone configured in Monitoring Console Pod Peers String + 12. Delete 2nd Standalone + 13. Wait for Monitoring Console to go to UPDATING then READY + 14. Verify only first Standalone configured in Monitoring Console Config Map + 15. Verify only first Standalone configured in Monitoring Console Pod Peers String + */ standaloneOneName := deployment.GetName() - standaloneOne, err := deployment.DeployStandalone(standaloneOneName) + mcName := deployment.GetName() + spec := enterprisev1.StandaloneSpec{ + CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + } + standaloneOne, err := deployment.DeployStandaloneWithGivenSpec(standaloneOneName, spec) Expect(err).To(Succeed(), "Unable to deploy standalone instance") // Wait for standalone to be in READY Status testenv.StandaloneReady(deployment, deployment.GetName(), standaloneOne, testenvInstance) - // Wait for Monitoring Console Pod to be in READY status - testenv.MCPodReady(testenvInstance.GetName(), deployment) + // Deploy MC and wait for MC to be READY + mc, err := deployment.DeployMonitoringConsole(deployment.GetName(), "") + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console instance") + + // Verify MC is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(deployment, deployment.GetName(), mc, testenvInstance) + + // Get contents of MC config map + mcConfigMap, err := testenv.GetMCConfigMap(deployment, testenvInstance.GetName(), mcName) + Expect(err).To(Succeed(), "Unable to get MC config map") + + // Check Standalone is configured in MC Config Map + podName := fmt.Sprintf(testenv.StandalonePod, standaloneOneName, 0) + Expect(testenv.CheckPodNameInString(podName, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(true)) // Check Monitoring console is configured with all standalone instances in namespace - peerList := testenv.GetConfiguredPeers(testenvInstance.GetName()) + peerList := testenv.GetConfiguredPeers(testenvInstance.GetName(), deployment.GetName()) testenvInstance.Log.Info("Peer List", "instance", peerList) // Only 1 peer expected in MC peer list Expect(len(peerList)).To(Equal(1)) - - podName := fmt.Sprintf(testenv.StandalonePod, standaloneOneName, 0) - testenvInstance.Log.Info("Check standalone instance in MC Peer list", "Standalone Pod", podName, "Peer in peer list", peerList[0]) - Expect(strings.Contains(peerList[0], podName)).To(Equal(true)) + Expect(testenv.CheckPodNameInString(podName, peerList[0])).To(Equal(true)) // Add another standalone instance in namespace testenvInstance.Log.Info("Adding second standalone deployment to namespace") @@ -103,31 +197,43 @@ var _ = Describe("Monitoring Console test", func() { }, }, Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, }, } - standaloneTwo, err := deployment.DeployStandalonewithGivenSpec(standaloneTwoName, standaloneTwoSpec) + standaloneTwo, err := deployment.DeployStandaloneWithGivenSpec(standaloneTwoName, standaloneTwoSpec) Expect(err).To(Succeed(), "Unable to deploy standalone instance ") // Wait for standalone two to be in READY status testenv.StandaloneReady(deployment, standaloneTwoName, standaloneTwo, testenvInstance) - // Wait for Monitoring Console Pod to be in READY status - testenv.MCPodReady(testenvInstance.GetName(), deployment) + // Wait for MC to go to Updating Phase + testenv.VerifyMonitoringConsolePhase(deployment, testenvInstance, deployment.GetName(), splcommon.PhaseUpdating) + + // Verify MC is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(deployment, deployment.GetName(), mc, testenvInstance) + + // Get contents of MC config map + mcConfigMap, err = testenv.GetMCConfigMap(deployment, testenvInstance.GetName(), mcName) + Expect(err).To(Succeed(), "Unable to get MC config map") + + // Check Standalone is configure in MC Config Map + podNameOne := fmt.Sprintf(testenv.StandalonePod, standaloneOneName, 0) + podNameTwo := fmt.Sprintf(testenv.StandalonePod, standaloneTwoName, 0) + Expect(testenv.CheckPodNameInString(podNameOne, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(true)) + Expect(testenv.CheckPodNameInString(podNameTwo, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(true)) // Check Monitoring console is configured with all standalone instances in namespace - peerList = testenv.GetConfiguredPeers(testenvInstance.GetName()) + peerList = testenv.GetConfiguredPeers(testenvInstance.GetName(), deployment.GetName()) testenvInstance.Log.Info("Peer List", "instance", peerList) // Only 2 peers expected in MC peer list Expect(len(peerList)).To(Equal(2)) - // Verify Pod Name in Peer List - podNameOne := fmt.Sprintf(testenv.StandalonePod, standaloneOneName, 0) - podNameTwo := fmt.Sprintf(testenv.StandalonePod, standaloneTwoName, 0) - testenvInstance.Log.Info("Checking Standalone on MC", "Standalone POD Name", podNameOne) - Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), podNameOne), true) - testenvInstance.Log.Info("Checking Standalone on MC", "Standalone POD Name", podNameTwo) - Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), podNameTwo), true) + // Verify both standalone Pod Name in Peer List + Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podNameOne), true) + Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podNameTwo), true) // Delete Standlone TWO of the standalone and ensure MC is updated testenvInstance.Log.Info("Deleting second standalone deployment to namespace", "Standalone Name", standaloneTwoName) @@ -135,45 +241,96 @@ var _ = Describe("Monitoring Console test", func() { err = deployment.DeleteCR(standaloneTwo) Expect(err).To(Succeed(), "Unable to delete standalone instance", "Standalone Name", standaloneTwo) - // Wait for Monitoring Console Pod to be in READY status - testenv.MCPodReady(testenvInstance.GetName(), deployment) + // Wait for MC to go to Updating Phase + testenv.VerifyMonitoringConsolePhase(deployment, testenvInstance, deployment.GetName(), splcommon.PhaseUpdating) + + // Verify MC is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(deployment, deployment.GetName(), mc, testenvInstance) + + // Get contents of MC config map + mcConfigMap, err = testenv.GetMCConfigMap(deployment, testenvInstance.GetName(), mcName) + Expect(err).To(Succeed(), "Unable to get MC config map") + + // Check Standalone One is configure in MC Config Map + Expect(testenv.CheckPodNameInString(podNameOne, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(true)) + + // Check standalone two is removed from MC Config Map + Expect(testenv.CheckPodNameInString(podNameTwo, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(false)) // Check Monitoring console is configured with all standalone instances in namespace - peerList = testenv.GetConfiguredPeers(testenvInstance.GetName()) + peerList = testenv.GetConfiguredPeers(testenvInstance.GetName(), deployment.GetName()) testenvInstance.Log.Info("Peer List", "instance", peerList) // Only 1 peer expected in MC peer list Expect(len(peerList)).To(Equal(1)) - podName = fmt.Sprintf(testenv.StandalonePod, standaloneOneName, 0) - testenvInstance.Log.Info("Check standalone instance in MC Peer list", "Standalone Pod", podName, "Peer in peer list", peerList[0]) - Expect(strings.Contains(peerList[0], podName)).To(Equal(true)) + // Check Only one standalone configured on MC Pod peer list + Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podNameOne), true) + Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podNameTwo), false) }) }) - XContext("Standalone deployment with Scale up", func() { + Context("Standalone deployment with Scale up", func() { It("monitoring_console: can deploy a MC with standalone instance and update MC when standalone is scaled up", func() { + /* + Test Steps + 1. Deploy Standalone + 2. Wait for Standalone to go to READY + 3. Deploy Monitoring Console + 4. Wait for Monitoring Console status to be READY + 5. Verify Standalone configured in Monitoring Console Config Map + 6. Verify Monitoring Console Pod has correct peers in Peer List + 7. Scale Standalone to 2 REPLICAS + 8. Wait for Second Standalone POD to come up and PHASE to be READY + 9. Wait for Monitoring Console status to go UPDATING then READY + 10. Verify both Standalone PODS configured in Monitoring Console Config Map + 11. Verify both Standalone configured in Monitoring Console Pod Peers String + */ + + standaloneName := deployment.GetName() + mcName := deployment.GetName() + spec := enterprisev1.StandaloneSpec{ + CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + MonitoringConsoleRef: corev1.ObjectReference{ + Name: mcName, + }, + }, + } - standalone, err := deployment.DeployStandalone(deployment.GetName()) - Expect(err).To(Succeed(), "Unable to deploy standalone instance ") + standalone, err := deployment.DeployStandaloneWithGivenSpec(standaloneName, spec) + Expect(err).To(Succeed(), "Unable to deploy standalone instance") - // Wait for Standalone to be in READY status + // Wait for standalone to be in READY Status testenv.StandaloneReady(deployment, deployment.GetName(), standalone, testenvInstance) - // Wait for Monitoring Console Pod to be in READY status - testenv.MCPodReady(testenvInstance.GetName(), deployment) + // Deploy MC and wait for MC to be READY + mc, err := deployment.DeployMonitoringConsole(deployment.GetName(), "") + Expect(err).To(Succeed(), "Unable to deploy Monitoring Console instance") + + // Verify MC is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(deployment, deployment.GetName(), mc, testenvInstance) + + // Get contents of MC config map + mcConfigMap, err := testenv.GetMCConfigMap(deployment, testenvInstance.GetName(), mcName) + Expect(err).To(Succeed(), "Unable to get MC config map") + + // Check Standalone is configure in MC Config Map + podName := fmt.Sprintf(testenv.StandalonePod, standaloneName, 0) + Expect(testenv.CheckPodNameInString(podName, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(true)) // Check Monitoring console is configured with all standalone instances in namespace - peerList := testenv.GetConfiguredPeers(testenvInstance.GetName()) + peerList := testenv.GetConfiguredPeers(testenvInstance.GetName(), deployment.GetName()) testenvInstance.Log.Info("Peer List", "instance", peerList) // Only 1 peer expected in MC peer list Expect(len(peerList)).To(Equal(1)) // Check spluk standlone pods are configured in MC peer list - podName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0) - testenvInstance.Log.Info("Check standalone instance in MC Peer list", "Standalone Pod", podName, "Peer in peer list", peerList[0]) - Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), podName)).To(Equal(true)) + Expect(testenv.CheckPodNameInString(podName, peerList[0])).To(Equal(true)) // Scale Standalone instance testenvInstance.Log.Info("Scaling Standalone CR") @@ -193,20 +350,32 @@ var _ = Describe("Monitoring Console test", func() { // Wait for Standalone to be in READY status testenv.StandaloneReady(deployment, deployment.GetName(), standalone, testenvInstance) - // Wait for Monitoring Console Pod to be in READY status - testenv.MCPodReady(testenvInstance.GetName(), deployment) + // Wait for MC to go to Updating Phase + testenv.VerifyMonitoringConsolePhase(deployment, testenvInstance, deployment.GetName(), splcommon.PhaseUpdating) + + // Verify MC is Ready and stays in ready state + testenv.VerifyMonitoringConsoleReady(deployment, deployment.GetName(), mc, testenvInstance) + + // Get contents of MC config map + mcConfigMap, err = testenv.GetMCConfigMap(deployment, testenvInstance.GetName(), mcName) + Expect(err).To(Succeed(), "Unable to get MC config map") + + // Check Standalone is configured in MC Config Map + for i := range []int{0, 1} { + podName := fmt.Sprintf(testenv.StandalonePod, standaloneName, i) + Expect(testenv.CheckPodNameInString(podName, mcConfigMap.Data["SPLUNK_STANDALONE_URL"])).To(Equal(true)) + } // Only 2 peer expected in MC peer list - peerList = testenv.GetConfiguredPeers(testenvInstance.GetName()) + peerList = testenv.GetConfiguredPeers(testenvInstance.GetName(), deployment.GetName()) testenvInstance.Log.Info("Peers in configuredPeer List", "count", len(peerList)) Expect(len(peerList)).To(Equal(2)) // Verify Pod Name in Peer List - podNameTwo := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 1) - testenvInstance.Log.Info("Checking Standalone on MC", "Standalone POD Name", podName) - Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), podName), true) - testenvInstance.Log.Info("Checking Standalone on MC", "Standalone POD Name", podNameTwo) - Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), podNameTwo), true) + for i := range []int{0, 1} { + podName := fmt.Sprintf(testenv.StandalonePod, standaloneName, i) + Expect(testenv.CheckPodNameInString(podName, peerList[i])).To(Equal(true)) + } }) }) @@ -234,7 +403,7 @@ var _ = Describe("Monitoring Console test", func() { for i := 0; i < defaultSHReplicas; i++ { podName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i) testenvInstance.Log.Info("Checking for Search Head on MC", "Search Head Name", podName) - found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), podName) + found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podName) Expect(found).To(Equal(true)) } @@ -243,7 +412,7 @@ var _ = Describe("Monitoring Console test", func() { podName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), i) podIP := testenv.GetPodIP(testenvInstance.GetName(), podName) testenvInstance.Log.Info("Checking for Indexer Pod on MC", "Search Head Name", podName, "IP Address", podIP) - found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), podIP) + found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podIP) Expect(found).To(Equal(true)) } @@ -303,13 +472,13 @@ var _ = Describe("Monitoring Console test", func() { // Check Standalone configured on Monitoring Console podName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0) testenvInstance.Log.Info("Check standalone instance in MC Peer list", "Standalone Pod", podName) - Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), podName)).To(Equal(true)) + Expect(testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podName)).To(Equal(true)) // Verify all Search Head Members are configured on Monitoring Console for i := 0; i < scaledSHReplicas; i++ { podName := fmt.Sprintf(testenv.SearchHeadPod, deployment.GetName(), i) testenvInstance.Log.Info("Checking for Search Head on MC after adding Standalone", "Search Head Name", podName) - found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), podName) + found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podName) Expect(found).To(Equal(true)) } @@ -318,7 +487,7 @@ var _ = Describe("Monitoring Console test", func() { podName := fmt.Sprintf(testenv.IndexerPod, deployment.GetName(), i) podIP := testenv.GetPodIP(testenvInstance.GetName(), podName) testenvInstance.Log.Info("Checking for Indexer Pod on MC", "Search Head Name", podName, "IP Address", podIP) - found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), podIP) + found := testenv.CheckPodNameOnMC(testenvInstance.GetName(), deployment.GetName(), podIP) Expect(found).To(Equal(true)) } }) diff --git a/test/scaling_test/scaling_test.go b/test/scaling_test/scaling_test.go index 7af1318b6..660965c35 100644 --- a/test/scaling_test/scaling_test.go +++ b/test/scaling_test/scaling_test.go @@ -62,7 +62,7 @@ var _ = Describe("Scaling test", func() { // testenv.MCPodReady(testenvInstance.GetName(), deployment) // Check Monitoring console is configured with all standalone instances in namespace - peerList := testenv.GetConfiguredPeers(testenvInstance.GetName()) + peerList := testenv.GetConfiguredPeers(testenvInstance.GetName(), deployment.GetName()) testenvInstance.Log.Info("Peer List", "instance", peerList) // Scale Standalone instance diff --git a/test/smoke/smoke_test.go b/test/smoke/smoke_test.go index e5a48494e..31aeaed07 100644 --- a/test/smoke/smoke_test.go +++ b/test/smoke/smoke_test.go @@ -160,7 +160,7 @@ var _ = Describe("Smoke test", func() { } // Create standalone Deployment with License Master - standalone, err := deployment.DeployStandalonewithGivenSpec(deployment.GetName(), standaloneSpec) + standalone, err := deployment.DeployStandaloneWithGivenSpec(deployment.GetName(), standaloneSpec) Expect(err).To(Succeed(), "Unable to deploy standalone instance with LM") // Wait for Standalone to be in READY status diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index dc42616f7..1215474b4 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -458,8 +458,8 @@ func (d *Deployment) DeployStandaloneWithLM(name string) (*enterprisev1.Standalo return deployed.(*enterprisev1.Standalone), err } -// DeployStandalonewithGivenSpec deploys a standalone with given spec -func (d *Deployment) DeployStandalonewithGivenSpec(name string, spec enterprisev1.StandaloneSpec) (*enterprisev1.Standalone, error) { +// DeployStandaloneWithGivenSpec deploys a standalone with given spec +func (d *Deployment) DeployStandaloneWithGivenSpec(name string, spec enterprisev1.StandaloneSpec) (*enterprisev1.Standalone, error) { standalone := newStandaloneWithGivenSpec(name, d.testenv.namespace, spec) deployed, err := d.deployCR(name, standalone) if err != nil { diff --git a/test/testenv/mcutil.go b/test/testenv/mcutil.go index 087bfc58b..9cf687414 100644 --- a/test/testenv/mcutil.go +++ b/test/testenv/mcutil.go @@ -20,6 +20,9 @@ import ( "os/exec" "strings" + "github.com/splunk/splunk-operator/pkg/splunk/enterprise" + corev1 "k8s.io/api/core/v1" + gomega "github.com/onsi/gomega" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -73,8 +76,8 @@ func CheckMCPodReady(ns string) bool { } // GetConfiguredPeers get list of Peers Configured on Montioring Console -func GetConfiguredPeers(ns string) []string { - podName := fmt.Sprintf(MonitoringConsolePod, ns, 0) +func GetConfiguredPeers(ns string, deploymentName string) []string { + podName := fmt.Sprintf(MonitoringConsolePod, deploymentName, 0) var peerList []string if len(podName) > 0 { peerFile := "/opt/splunk/etc/apps/splunk_monitoring_console/local/splunk_monitoring_console_assets.conf" @@ -132,9 +135,9 @@ func MCPodReady(ns string, deployment *Deployment) { } // CheckPodNameOnMC Check Standalone Pod configured on MC -func CheckPodNameOnMC(ns string, podName string) bool { +func CheckPodNameOnMC(ns string, deploymentName string, podName string) bool { // Get Peers configured on Monitoring Console - peerList := GetConfiguredPeers(ns) + peerList := GetConfiguredPeers(ns, deploymentName) logf.Log.Info("Peer List", "instance", peerList) found := false for _, peer := range peerList { @@ -163,3 +166,21 @@ func GetPodIP(ns string, podName string) string { } return restResponse.Status.PodIP } + +// GetMCConfigMap gets config map for give Monitoring Console Name +func GetMCConfigMap(deployment *Deployment, ns string, mcName string) (*corev1.ConfigMap, error) { + mcConfigMapName := enterprise.GetSplunkMonitoringconsoleConfigMapName(deployment.GetName(), enterprise.SplunkMonitoringConsole) + mcConfigMap, err := GetConfigMap(deployment, ns, mcConfigMapName) + if err != nil { + logf.Log.Error(err, "Failed to get Monitoring Console Config Map") + return mcConfigMap, err + } + logf.Log.Info("MC Config Map contents", "Data", mcConfigMap.Data) + return mcConfigMap, err +} + +// CheckPodNameInString checks for pod name in string +func CheckPodNameInString(podName string, configString string) bool { + logf.Log.Info("Check MC Config String has Pod configured", "Monitoring Console Config Map Pod Config String", configString, "POD String", podName) + return strings.Contains(configString, podName) +} diff --git a/test/testenv/util.go b/test/testenv/util.go index df2f6684b..dc136fd82 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -578,3 +578,13 @@ func ExecuteCommandOnPod(deployment *Deployment, podName string, stdin string) ( logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) return stdout, nil } + +// GetConfigMap Gets the config map for a given k8 config map name +func GetConfigMap(deployment *Deployment, ns string, configMapName string) (*corev1.ConfigMap, error) { + configMap := &corev1.ConfigMap{} + err := deployment.GetInstance(configMapName, configMap) + if err != nil { + deployment.testenv.Log.Error(err, "Unable to get config map", "Config Map Name", configMap, "Namespace", ns) + } + return configMap, err +} diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index f326cfc75..04c0bb86c 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -405,6 +405,20 @@ func VerifyStandalonePhase(deployment *Deployment, testenvInstance *TestEnv, crN }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) } +// VerifyMonitoringConsolePhase verify the phase of Monitoring Console CR +func VerifyMonitoringConsolePhase(deployment *Deployment, testenvInstance *TestEnv, crName string, phase splcommon.Phase) { + gomega.Eventually(func() splcommon.Phase { + mc := &enterprisev1.MonitoringConsole{} + err := deployment.GetInstance(crName, mc) + if err != nil { + return splcommon.PhaseError + } + testenvInstance.Log.Info("Waiting for monitoring console CR status", "instance", mc.ObjectMeta.Name, "Expected", phase, " Actual Phase", mc.Status.Phase) + DumpGetPods(testenvInstance.GetName()) + return mc.Status.Phase + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(phase)) +} + // VerifyCPULimits verifies value of CPU limits is as expected func VerifyCPULimits(deployment *Deployment, ns string, podName string, expectedCPULimits string) { gomega.Eventually(func() bool {