diff --git a/.circleci/config.yml b/.circleci/config.yml index d03f247a3..3e24f8dec 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -282,7 +282,7 @@ jobs: mkdir -p /tmp/test-results find ./test -name "*junit.xml" -exec cp {} /tmp/test-results \; environment: - TEST_FOCUS: "smoke|ingest_search|monitoring_console|deletecr" + TEST_FOCUS: "smoke|ingest_search|monitoring_console|deletecr|smartstore" - store_test_results: name: Save test results path: /tmp/test-results diff --git a/test/env.sh b/test/env.sh index b025153d7..513758f3e 100644 --- a/test/env.sh +++ b/test/env.sh @@ -10,8 +10,12 @@ : "${ECR_REGISTRY:=}" : "${VPC_PUBLIC_SUBNET_STRING:=}" : "${VPC_PRIVATE_SUBNET_STRING:=}" +# Below env variables required to run license master test cases : "${ENTERPRISE_LICENSE_PATH:=}" : "${TEST_S3_BUCKET:=}" +# Below env variables requried to run remote indexes test cases +: "${INDEXES_S3_BUCKET:=}" +: "${AWS_S3_REGION:=}" # Docker registry to use to push the test images to and pull from in the cluster if [ -z "${PRIVATE_REGISTRY}" ]; then diff --git a/test/run-tests.sh b/test/run-tests.sh index fd53fdfcc..8f8074bc6 100755 --- a/test/run-tests.sh +++ b/test/run-tests.sh @@ -91,10 +91,23 @@ if [[ -z "${ENTERPRISE_LICENSE_LOCATION}" ]]; then echo "License path not set. Changing to default" export ENTERPRISE_LICENSE_LOCATION="${ENTERPRISE_LICENSE_PATH}" fi + +# Set env s3 env variables if [[ -z "${TEST_BUCKET}" ]]; then - echo "Test bucket not set. Changing to default" + echo "Data bucket not set. Changing to default" export TEST_BUCKET="${TEST_S3_BUCKET}" fi +if [[ -z "${TEST_INDEXES_S3_BUCKET}" ]]; then + echo "Test bucket not set. Changing to default" + export TEST_INDEXES_S3_BUCKET="${INDEXES_S3_BUCKET}" +fi + +if [[ -z "${S3_REGION}" ]]; then + echo "S3 Region not set. Changing to default" + export S3_REGION="${AWS_S3_REGION}" +fi + + # Running only smoke test cases by default or value passed through TEST_FOCUS env variable. To run different test packages add/remove path from focus argument or TEST_FOCUS variable ginkgo -v -progress -r -stream -nodes=${NUM_NODES} --focus="${TEST_TO_RUN}" ${topdir}/test -- -commit-hash=${COMMIT_HASH} -operator-image=${PRIVATE_SPLUNK_OPERATOR_IMAGE} -splunk-image=${PRIVATE_SPLUNK_ENTERPRISE_IMAGE} \ No newline at end of file diff --git a/test/smartstore/smartstore_suite_test.go b/test/smartstore/smartstore_suite_test.go new file mode 100644 index 000000000..fdbcc2a6d --- /dev/null +++ b/test/smartstore/smartstore_suite_test.go @@ -0,0 +1,47 @@ +package smartstore + +import ( + "testing" + "time" + + . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo/reporters" + . "github.com/onsi/gomega" + + "github.com/splunk/splunk-operator/test/testenv" +) + +const ( + // PollInterval specifies the polling interval + PollInterval = 5 * time.Second + + // ConsistentPollInterval is the interval to use to consistently check a state is stable + ConsistentPollInterval = 200 * time.Millisecond + ConsistentDuration = 2000 * time.Millisecond +) + +var ( + testenvInstance *testenv.TestEnv + testSuiteName = "smartore-" + testenv.RandomDNSName(2) +) + +// TestBasic is the main entry point +func TestBasic(t *testing.T) { + + RegisterFailHandler(Fail) + + junitReporter := reporters.NewJUnitReporter(testSuiteName + "_junit.xml") + RunSpecsWithDefaultAndCustomReporters(t, "Running "+testSuiteName, []Reporter{junitReporter}) +} + +var _ = BeforeSuite(func() { + var err error + testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) + Expect(err).ToNot(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + if testenvInstance != nil { + Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) + } +}) diff --git a/test/smartstore/smartstore_test.go b/test/smartstore/smartstore_test.go new file mode 100644 index 000000000..e37e28e7b --- /dev/null +++ b/test/smartstore/smartstore_test.go @@ -0,0 +1,67 @@ +package smartstore + +import ( + "fmt" + "os/exec" + "strings" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/splunk/splunk-operator/test/testenv" +) + +func dumpGetPods(ns string) { + output, _ := exec.Command("kubectl", "get", "pod", "-n", ns).Output() + for _, line := range strings.Split(string(output), "\n") { + testenvInstance.Log.Info(line) + } +} + +var _ = Describe("Smoke test", func() { + + var deployment *testenv.Deployment + + BeforeEach(func() { + var err error + deployment, err = testenvInstance.NewDeployment(testenv.RandomDNSName(3)) + Expect(err).To(Succeed(), "Unable to create deployment") + }) + + AfterEach(func() { + // When a test spec failed, skip the teardown so we can troubleshoot. + if CurrentGinkgoTestDescription().Failed { + testenvInstance.SkipTeardown = true + } + if deployment != nil { + deployment.Teardown() + } + }) + + Context("Confiugre indexes on standlaone deployment using CR Spec", func() { + It("smartstore: Can configure indexes through app", func() { + volumeName := "test-volume-" + testenv.RandomDNSName(3) + indexName := "test-index-" + testenv.RandomDNSName(3) + testenvInstance.Log.Info("index secret name ", "secret name ", testenvInstance.GetIndexSecretName()) + standalone, err := deployment.DeployStandaloneWithIndexes(deployment.GetName(), testenvInstance.GetIndexSecretName(), deployment.GetName(), volumeName, indexName) + Expect(err).To(Succeed(), "Unable to deploy standalone instance ") + + // Verify standalone goes to ready state + testenv.StandaloneReady(deployment, deployment.GetName(), standalone, testenvInstance) + + // Check index on pod + podName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0) + testenv.VerifyIndexFoundOnPod(deployment, podName, indexName) + + // Ingest data to the index + logFile := "/opt/splunk/var/log/splunk/splunkd.log" + testenv.IngestFileViaMonitor(logFile, indexName, podName, deployment) + + // Roll Hot Buckets on the test index by restarting splunk + testenv.RollHotToWarm(deployment, podName, indexName) + + // Check for index on S3 + testenv.VerifyIndexExistsOnS3(deployment, podName, indexName) + }) + }) +}) diff --git a/test/testenv/cmutil.go b/test/testenv/cmutil.go index ab880255d..119025d02 100644 --- a/test/testenv/cmutil.go +++ b/test/testenv/cmutil.go @@ -19,8 +19,31 @@ import ( "fmt" logf "sigs.k8s.io/controller-runtime/pkg/log" + "strings" ) +// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master +// Endpoint: /services/cluster/master/sites +type ClusterMasterSitesResponse struct { + Entries []ClusterMasterSitesEntry `json:"entry"` +} + +// ClusterMasterSitesEntry represents a site of an indexer cluster with its metadata +type ClusterMasterSitesEntry struct { + Name string `json:"name"` + Content ClusterMasterSitesContent `json:"content"` +} + +// ClusterMasterSitesContent represents detailed information about a site +type ClusterMasterSitesContent struct { + Peers map[string]ClusterMasterSitesPeer `json:"peers"` +} + +// ClusterMasterSitesPeer reprensents an indexer peer member of a site +type ClusterMasterSitesPeer struct { + ServerName string `json:"server_name"` +} + // ClusterMasterHealthResponse is a representation of the health response by a Splunk cluster-master // Endpoint: /services/cluster/master/health type ClusterMasterHealthResponse struct { @@ -122,24 +145,52 @@ func CheckSearchHeadRemoved(deployment *Deployment) bool { return searchHeadRemoved } -// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master -// Endpoint: /services/cluster/master/sites -type ClusterMasterSitesResponse struct { - Entries []ClusterMasterSitesEntry `json:"entry"` -} - -// ClusterMasterSitesEntry represents a site of an indexer cluster with its metadata -type ClusterMasterSitesEntry struct { - Name string `json:"name"` - Content ClusterMasterSitesContent `json:"content"` +// RollHotBuckets roll hot buckets in cluster +func RollHotBuckets(deployment *Deployment) bool { + podName := fmt.Sprintf("splunk-%s-cluster-master-0", deployment.GetName()) + stdin := "/opt/splunk/bin/splunk rolling-restart cluster-peers -auth admin:$(cat /mnt/splunk-secrets/password)" + command := []string{"/bin/sh"} + stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) + if err != nil { + logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command) + return false + } + logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) + if strings.Contains(stdout, "Rolling restart of all cluster peers has been initiated.") { + return true + } + return false } -// ClusterMasterSitesContent represents detailed information about a site -type ClusterMasterSitesContent struct { - Peers map[string]ClusterMasterSitesPeer `json:"peers"` +// RollingRestartEndpointResponse is represtentation of /services/cluster/master/info endpiont +type RollingRestartEndpointResponse struct { + Entry []struct { + Content struct { + RollingRestartFlag bool `json:"rolling_restart_flag"` + } `json:"content"` + } `json:"entry"` } -// ClusterMasterSitesPeer reprensents an indexer peer member of a site -type ClusterMasterSitesPeer struct { - ServerName string `json:"server_name"` +// CheckRollingRestartStatus checks if rolling restart is happening in cluster +func CheckRollingRestartStatus(deployment *Deployment) bool { + podName := fmt.Sprintf("splunk-%s-cluster-master-0", deployment.GetName()) + stdin := "curl -ks -u admin:$(cat /mnt/splunk-secrets/password) https://localhost:8089/services/cluster/master/info?output_mode=json" + command := []string{"/bin/sh"} + stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) + if err != nil { + logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command) + return false + } + logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) + restResponse := RollingRestartEndpointResponse{} + err = json.Unmarshal([]byte(stdout), &restResponse) + if err != nil { + logf.Log.Error(err, "Failed to parse cluster searchheads") + return false + } + rollingRestart := true + for _, entry := range restResponse.Entry { + rollingRestart = entry.Content.RollingRestartFlag + } + return rollingRestart } diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 38fc92342..005774391 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1beta1" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" ) // Deployment simply represents the deployment (standalone, clustered,...etc) we create on the testenv @@ -427,3 +428,33 @@ func (d *Deployment) DeployStandalonewithGivenSpec(name string, spec enterprisev } return deployed.(*enterprisev1.Standalone), err } + +// DeployStandaloneWithIndexes deploys a standalone splunk enterprise instance on the specified testenv +func (d *Deployment) DeployStandaloneWithIndexes(name string, indexesSecret string, volumeName, string, indexName string) (*enterprisev1.Standalone, error) { + + s3Endpoint := "https://s3-" + s3Region + ".amazonaws.com" + volumeSpec := GenerateIndexVolumeSpec(volumeName, s3Endpoint, testIndexesS3Bucket, indexesSecret) + indexSpec := GenerateIndexSpec(indexName, volumeName) + spec := enterprisev1.StandaloneSpec{ + CommonSplunkSpec: enterprisev1.CommonSplunkSpec{ + Spec: splcommon.Spec{ + ImagePullPolicy: "IfNotPresent", + }, + Volumes: []corev1.Volume{}, + }, + SmartStore: enterprisev1.SmartStoreSpec{ + VolList: []enterprisev1.VolumeSpec{ + volumeSpec, + }, + IndexList: []enterprisev1.IndexSpec{ + indexSpec, + }, + }, + } + standalone := newStandaloneWithSpec(name, d.testenv.namespace, spec) + deployed, err := d.deployCR(name, standalone) + if err != nil { + return nil, err + } + return deployed.(*enterprisev1.Standalone), err +} diff --git a/test/testenv/ingest_utils.go b/test/testenv/ingest_utils.go index f215a9a4c..f2a6a99a2 100644 --- a/test/testenv/ingest_utils.go +++ b/test/testenv/ingest_utils.go @@ -222,3 +222,33 @@ func CopyFileToPod(podName string, srcPath string, destPath string, deployment * //go:linkname cpMakeTar k8s.io/kubernetes/pkg/kubectl/cmd/cp.makeTar func cpMakeTar(srcPath, destPath string, writer io.Writer) error + +// IngestFileViaMonitor ingests a file into an instance using the oneshot CLI +func IngestFileViaMonitor(logFile string, indexName string, podName string, deployment *Deployment) error { + + // Monitor log into specified index + var addMonitorCmd strings.Builder + splunkBin := "/opt/splunk/bin/splunk" + username := "admin" + password := "$(cat /mnt/splunk-secrets/password)" + splunkCmd := "add monitor" + + fmt.Fprintf(&addMonitorCmd, "%s %s %s -index %s -auth %s:%s", splunkBin, splunkCmd, logFile, indexName, username, password) + command := []string{"/bin/bash"} + stdin := addMonitorCmd.String() + addMonitorResp, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) + if err != nil { + logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "stdin", stdin, "addMonitorResp", addMonitorResp, "stderr", stderr) + return err + } + + // Validate the expected CLI response + var expectedResp strings.Builder + fmt.Fprintf(&expectedResp, "Added monitor of '%s'", logFile) + if strings.Compare(addMonitorResp, expectedResp.String()) == 0 { + logf.Log.Error(err, "Failed response to add monitor to splunk", "pod", podName, "addMonitorResp", addMonitorResp) + return err + } + logf.Log.Info("File Ingested via add monitor Successfully", "logFile", logFile, "addMonitorResp", addMonitorResp) + return nil +} diff --git a/test/testenv/lmutil.go b/test/testenv/lmutil.go index d2feaecf4..d2eea6f21 100644 --- a/test/testenv/lmutil.go +++ b/test/testenv/lmutil.go @@ -16,15 +16,8 @@ package testenv import ( "encoding/json" - "fmt" - "os" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - logf "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -62,30 +55,3 @@ func CheckLicenseMasterConfigured(deployment *Deployment, podName string) bool { logf.Log.Info("License Master configuration on POD", "POD", podName, "License Master", licenseMaster) return strings.Contains(licenseMaster, "license-master-service:8089") } - -// DownloadFromS3Bucket downloads license file from S3 -func DownloadFromS3Bucket() (string, error) { - dataBucket := os.Getenv("TEST_BUCKET") - location := os.Getenv("ENTERPRISE_LICENSE_LOCATION") - fmt.Printf("%s : dataBucket %s : location\n", os.Getenv("TEST_BUCKET"), os.Getenv("ENTERPRISE_LICENSE_LOCATION")) - item := "enterprise.lic" - file, err := os.Create(item) - if err != nil { - logf.Log.Error(err, "Failed to create license file") - } - defer file.Close() - - sess, _ := session.NewSession(&aws.Config{Region: aws.String("us-west-2")}) - downloader := s3manager.NewDownloader(sess) - numBytes, err := downloader.Download(file, - &s3.GetObjectInput{ - Bucket: aws.String(dataBucket), - Key: aws.String(location + "/" + "enterprise.lic"), - }) - if err != nil { - logf.Log.Error(err, "Failed to download license file") - } - - logf.Log.Info("Downloaded", "filename", file.Name(), "bytes", numBytes) - return file.Name(), err -} diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go new file mode 100644 index 000000000..11f0344e6 --- /dev/null +++ b/test/testenv/remote_index_utils.go @@ -0,0 +1,88 @@ +package testenv + +import ( + "encoding/json" + + enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1beta1" + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// dataIndexesResponse struct for /data/indexes response +type dataIndexesResponse struct { + Entry []struct { + Name string `json:"name"` + } `json:"entry"` +} + +// GetIndexOnPod get list of indexes on given pod +func GetIndexOnPod(deployment *Deployment, podName string, indexName string) bool { + stdin := "curl -ks -u admin:$(cat /mnt/splunk-secrets/password) https://localhost:8089/services/data/indexes?output_mode=json" + command := []string{"/bin/sh"} + stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) + if err != nil { + logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command) + return false + } + logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) + restResponse := dataIndexesResponse{} + err = json.Unmarshal([]byte(stdout), &restResponse) + if err != nil { + logf.Log.Error(err, "Failed to parse data/indexes response") + return false + } + indexFound := false + for _, entry := range restResponse.Entry { + if entry.Name == indexName { + indexFound = true + break + } + } + return indexFound +} + +// RestartSplunk Restart splunk inside the container +func RestartSplunk(deployment *Deployment, podName string) bool { + stdin := "/opt/splunk/bin/splunk restart -auth admin:$(cat /mnt/splunk-secrets/password)" + command := []string{"/bin/sh"} + stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) + if err != nil { + logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command) + return false + } + logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) + return true +} + +// RollHotToWarm rolls hot buckets to warm for a given index and pod +func RollHotToWarm(deployment *Deployment, podName string, indexName string) bool { + stdin := "/opt/splunk/bin/splunk _internal call /data/indexes/" + indexName + "/roll-hot-buckets admin:$(cat /mnt/splunk-secrets/password)" + command := []string{"/bin/sh"} + stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) + if err != nil { + logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command) + return false + } + logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) + return true +} + +// GenerateIndexVolumeSpec return VolumeSpec struct with given values +func GenerateIndexVolumeSpec(volumeName string, endpoint string, Path string, secretRef string) enterprisev1.VolumeSpec { + return enterprisev1.VolumeSpec{ + Name: volumeName, + Endpoint: endpoint, + Path: testIndexesS3Bucket, + SecretRef: secretRef, + } +} + +// GenerateIndexSpec return VolumeSpec struct with given values +func GenerateIndexSpec(indexName string, volName string) enterprisev1.IndexSpec { + return enterprisev1.IndexSpec{ + Name: indexName, + RemotePath: indexName, + IndexAndGlobalCommonSpec: enterprisev1.IndexAndGlobalCommonSpec{ + VolName: volName, + }, + } +} diff --git a/test/testenv/s3utils.go b/test/testenv/s3utils.go new file mode 100644 index 000000000..acf885bea --- /dev/null +++ b/test/testenv/s3utils.go @@ -0,0 +1,75 @@ +package testenv + +import ( + "os" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + + logf "sigs.k8s.io/controller-runtime/pkg/log" +) + +// Set S3 Variables +var ( + s3Region = os.Getenv("S3_REGION") + testS3Bucket = os.Getenv("TEST_BUCKET") + testIndexesS3Bucket = os.Getenv("TEST_INDEXES_S3_BUCKET") + enterpriseLicenseLocation = os.Getenv("ENTERPRISE_LICENSE_LOCATION") +) + +// CheckPrefixExistsOnS3 lists object in a bucket +func CheckPrefixExistsOnS3(prefix string) bool { + dataBucket := testIndexesS3Bucket + sess, err := session.NewSession(&aws.Config{Region: aws.String(s3Region)}) + if err != nil { + logf.Log.Error(err, "Failed to create s3 session") + } + svc := s3.New(session.Must(sess, err)) + resp, err := svc.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(dataBucket), + Prefix: aws.String(prefix), + }) + + if err != nil { + logf.Log.Error(err, "Failed to list objects on s3 bucket") + return false + } + + for _, key := range resp.Contents { + logf.Log.Info("CHECKING KEY ", "KEY", *key.Key) + if strings.Contains(*key.Key, prefix) { + logf.Log.Info("Prefix found on bucket", "Prefix", prefix, "KEY", *key.Key) + return true + } + } + + return false +} + +// DownloadFromS3Bucket downloads license file from S3 +func DownloadFromS3Bucket() (string, error) { + dataBucket := testS3Bucket + location := enterpriseLicenseLocation + item := "enterprise.lic" + file, err := os.Create(item) + if err != nil { + logf.Log.Error(err, "Failed to create license file") + } + defer file.Close() + sess, _ := session.NewSession(&aws.Config{Region: aws.String(s3Region)}) + downloader := s3manager.NewDownloader(sess) + numBytes, err := downloader.Download(file, + &s3.GetObjectInput{ + Bucket: aws.String(dataBucket), + Key: aws.String(location + "/" + "enterprise.lic"), + }) + if err != nil { + logf.Log.Error(err, "Failed to download license file") + } + + logf.Log.Info("Downloaded", "filename", file.Name(), "bytes", numBytes) + return file.Name(), err +} diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index 19178c298..d366cf586 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -18,6 +18,7 @@ import ( "context" "flag" "fmt" + "os" "time" "github.com/go-logr/logr" @@ -102,6 +103,7 @@ type TestEnv struct { SkipTeardown bool licenseFilePath string licenseCMName string + s3IndexSecret string kubeClient client.Client Log logr.Logger cleanupFuncs []cleanupFunc @@ -159,6 +161,7 @@ func NewTestEnv(name, commitHash, operatorImage, splunkImage, sparkImage, licens SkipTeardown: specifiedSkipTeardown, licenseCMName: envName, licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + envName, } testenv.Log = logf.Log.WithValues("testenv", testenv.name) @@ -242,6 +245,9 @@ func (testenv *TestEnv) setup() error { return err } + // Create s3 secret object for index test + testenv.createIndexSecret() + if testenv.licenseFilePath != "" { err = testenv.createLicenseConfigMap() if err != nil { @@ -495,7 +501,6 @@ func (testenv *TestEnv) createLicenseConfigMap() error { // Create a service account config func newServiceAccount(ns string, serviceAccountName string) *corev1.ServiceAccount { - new := corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ Kind: "ServiceAccount", @@ -528,6 +533,34 @@ func (testenv *TestEnv) CreateServiceAccount(name string) error { return nil } +// CreateIndexSecret create secret object +func (testenv *TestEnv) createIndexSecret() error { + secretName := testenv.s3IndexSecret + ns := testenv.namespace + data := map[string][]byte{"s3_access_key": []byte(os.Getenv("AWS_ACCESS_KEY_ID")), + "s3_secret_key": []byte(os.Getenv("AWS_SECRET_ACCESS_KEY"))} + secret := newSecretSpec(ns, secretName, data) + if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil { + testenv.Log.Error(err, "Unable to create s3 index secret object") + return err + } + + testenv.pushCleanupFunc(func() error { + err := testenv.GetKubeClient().Delete(context.TODO(), secret) + if err != nil { + testenv.Log.Error(err, "Unable to delete s3 index secret object") + return err + } + return nil + }) + return nil +} + +// GetIndexSecretName return index secret object name +func (testenv *TestEnv) GetIndexSecretName() string { + return testenv.s3IndexSecret +} + // NewDeployment creates a new deployment func (testenv *TestEnv) NewDeployment(name string) (*Deployment, error) { d := Deployment{ diff --git a/test/testenv/util.go b/test/testenv/util.go index c2d7b537f..0671c4bd5 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -405,6 +405,41 @@ func newStandaloneWithLM(name, ns string, licenseMasterName string) *enterprisev return &new } +// newSecretSpec create spec for smartstore secret object +func newSecretSpec(ns string, secretName string, data map[string][]byte) *corev1.Secret { + secret := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "apps/v1beta1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: ns, + }, + Data: data, + Type: "Opaque", + } + return secret +} + +// newStandaloneWithSpec creates and initializes CR for Standalone Kind with given spec +func newStandaloneWithSpec(name, ns string, spec enterprisev1.StandaloneSpec) *enterprisev1.Standalone { + + new := enterprisev1.Standalone{ + TypeMeta: metav1.TypeMeta{ + Kind: "Standalone", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, + }, + + Spec: spec, + } + return &new +} + // DumpGetPods prints list of pods in the namespace func DumpGetPods(ns string) { output, err := exec.Command("kubectl", "get", "pods", "-n", ns).Output() diff --git a/test/testenv/verificationutils.go b/test/testenv/verificationutils.go index 9cf12ed36..ced6318d3 100644 --- a/test/testenv/verificationutils.go +++ b/test/testenv/verificationutils.go @@ -254,3 +254,21 @@ func VerifyServiceAccountConfiguredOnPod(deployment *Deployment, ns string, podN return strings.Contains(serviceAccount, restResponse.Spec.ServiceAccount) }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) } + +// VerifyIndexFoundOnPod verify index found on a given POD +func VerifyIndexFoundOnPod(deployment *Deployment, podName string, indexName string) { + gomega.Eventually(func() bool { + indexFound := GetIndexOnPod(deployment, podName, indexName) + logf.Log.Info("Checking status of index on pod", "PODNAME", podName, "INDEX NAME", indexName, "STATUS", indexFound) + return indexFound + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) +} + +// VerifyIndexExistsOnS3 Verify Index Exists on S3 +func VerifyIndexExistsOnS3(deployment *Deployment, podName string, indexName string) { + gomega.Eventually(func() bool { + indexFound := CheckPrefixExistsOnS3(indexName) + logf.Log.Info("Checking Index on S3", "INDEX NAME", indexName, "STATUS", indexFound) + return indexFound + }, deployment.GetTimeout(), PollInterval).Should(gomega.Equal(true)) +}