Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ jobs:
mkdir -p /tmp/test-results
find ./test -name "*junit.xml" -exec cp {} /tmp/test-results \;
environment:
TEST_FOCUS: "smoke|ingest_search|monitoring_console|deletecr"
TEST_FOCUS: "smoke|ingest_search|monitoring_console|deletecr|smartstore"
- store_test_results:
name: Save test results
path: /tmp/test-results
Expand Down
4 changes: 4 additions & 0 deletions test/env.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,12 @@
: "${ECR_REGISTRY:=}"
: "${VPC_PUBLIC_SUBNET_STRING:=}"
: "${VPC_PRIVATE_SUBNET_STRING:=}"
# Below env variables required to run license master test cases
: "${ENTERPRISE_LICENSE_PATH:=}"
: "${TEST_S3_BUCKET:=}"
# Below env variables requried to run remote indexes test cases
: "${INDEXES_S3_BUCKET:=}"
: "${AWS_S3_REGION:=}"

# Docker registry to use to push the test images to and pull from in the cluster
if [ -z "${PRIVATE_REGISTRY}" ]; then
Expand Down
15 changes: 14 additions & 1 deletion test/run-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,23 @@ if [[ -z "${ENTERPRISE_LICENSE_LOCATION}" ]]; then
echo "License path not set. Changing to default"
export ENTERPRISE_LICENSE_LOCATION="${ENTERPRISE_LICENSE_PATH}"
fi

# Set env s3 env variables
if [[ -z "${TEST_BUCKET}" ]]; then
echo "Test bucket not set. Changing to default"
echo "Data bucket not set. Changing to default"
export TEST_BUCKET="${TEST_S3_BUCKET}"
fi

if [[ -z "${TEST_INDEXES_S3_BUCKET}" ]]; then
echo "Test bucket not set. Changing to default"
export TEST_INDEXES_S3_BUCKET="${INDEXES_S3_BUCKET}"
fi

if [[ -z "${S3_REGION}" ]]; then
echo "S3 Region not set. Changing to default"
export S3_REGION="${AWS_S3_REGION}"
fi


# Running only smoke test cases by default or value passed through TEST_FOCUS env variable. To run different test packages add/remove path from focus argument or TEST_FOCUS variable
ginkgo -v -progress -r -stream -nodes=${NUM_NODES} --focus="${TEST_TO_RUN}" ${topdir}/test -- -commit-hash=${COMMIT_HASH} -operator-image=${PRIVATE_SPLUNK_OPERATOR_IMAGE} -splunk-image=${PRIVATE_SPLUNK_ENTERPRISE_IMAGE}
47 changes: 47 additions & 0 deletions test/smartstore/smartstore_suite_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
package smartstore

import (
"testing"
"time"

. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"

"github.com/splunk/splunk-operator/test/testenv"
)

const (
// PollInterval specifies the polling interval
PollInterval = 5 * time.Second

// ConsistentPollInterval is the interval to use to consistently check a state is stable
ConsistentPollInterval = 200 * time.Millisecond
ConsistentDuration = 2000 * time.Millisecond
)

var (
testenvInstance *testenv.TestEnv
testSuiteName = "smartore-" + testenv.RandomDNSName(2)
)

// TestBasic is the main entry point
func TestBasic(t *testing.T) {

RegisterFailHandler(Fail)

junitReporter := reporters.NewJUnitReporter(testSuiteName + "_junit.xml")
RunSpecsWithDefaultAndCustomReporters(t, "Running "+testSuiteName, []Reporter{junitReporter})
}

var _ = BeforeSuite(func() {
var err error
testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName)
Expect(err).ToNot(HaveOccurred())
})

var _ = AfterSuite(func() {
if testenvInstance != nil {
Expect(testenvInstance.Teardown()).ToNot(HaveOccurred())
}
})
67 changes: 67 additions & 0 deletions test/smartstore/smartstore_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
package smartstore

import (
"fmt"
"os/exec"
"strings"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

"github.com/splunk/splunk-operator/test/testenv"
)

func dumpGetPods(ns string) {
output, _ := exec.Command("kubectl", "get", "pod", "-n", ns).Output()
for _, line := range strings.Split(string(output), "\n") {
testenvInstance.Log.Info(line)
}
}

var _ = Describe("Smoke test", func() {

var deployment *testenv.Deployment

BeforeEach(func() {
var err error
deployment, err = testenvInstance.NewDeployment(testenv.RandomDNSName(3))
Expect(err).To(Succeed(), "Unable to create deployment")
})

AfterEach(func() {
// When a test spec failed, skip the teardown so we can troubleshoot.
if CurrentGinkgoTestDescription().Failed {
testenvInstance.SkipTeardown = true
}
if deployment != nil {
deployment.Teardown()
}
})

Context("Confiugre indexes on standlaone deployment using CR Spec", func() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nitpick: Typos

It("smartstore: Can configure indexes through app", func() {
volumeName := "test-volume-" + testenv.RandomDNSName(3)
indexName := "test-index-" + testenv.RandomDNSName(3)
testenvInstance.Log.Info("index secret name ", "secret name ", testenvInstance.GetIndexSecretName())
standalone, err := deployment.DeployStandaloneWithIndexes(deployment.GetName(), testenvInstance.GetIndexSecretName(), deployment.GetName(), volumeName, indexName)
Expect(err).To(Succeed(), "Unable to deploy standalone instance ")

// Verify standalone goes to ready state
testenv.StandaloneReady(deployment, deployment.GetName(), standalone, testenvInstance)

// Check index on pod
podName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0)
testenv.VerifyIndexFoundOnPod(deployment, podName, indexName)

// Ingest data to the index
logFile := "/opt/splunk/var/log/splunk/splunkd.log"
testenv.IngestFileViaMonitor(logFile, indexName, podName, deployment)

// Roll Hot Buckets on the test index by restarting splunk
testenv.RollHotToWarm(deployment, podName, indexName)

// Check for index on S3
testenv.VerifyIndexExistsOnS3(deployment, podName, indexName)
})
})
})
83 changes: 67 additions & 16 deletions test/testenv/cmutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,31 @@ import (
"fmt"

logf "sigs.k8s.io/controller-runtime/pkg/log"
"strings"
)

// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master
// Endpoint: /services/cluster/master/sites
type ClusterMasterSitesResponse struct {
Entries []ClusterMasterSitesEntry `json:"entry"`
}

// ClusterMasterSitesEntry represents a site of an indexer cluster with its metadata
type ClusterMasterSitesEntry struct {
Name string `json:"name"`
Content ClusterMasterSitesContent `json:"content"`
}

// ClusterMasterSitesContent represents detailed information about a site
type ClusterMasterSitesContent struct {
Peers map[string]ClusterMasterSitesPeer `json:"peers"`
}

// ClusterMasterSitesPeer reprensents an indexer peer member of a site
type ClusterMasterSitesPeer struct {
ServerName string `json:"server_name"`
}

// ClusterMasterHealthResponse is a representation of the health response by a Splunk cluster-master
// Endpoint: /services/cluster/master/health
type ClusterMasterHealthResponse struct {
Expand Down Expand Up @@ -122,24 +145,52 @@ func CheckSearchHeadRemoved(deployment *Deployment) bool {
return searchHeadRemoved
}

// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master
// Endpoint: /services/cluster/master/sites
type ClusterMasterSitesResponse struct {
Entries []ClusterMasterSitesEntry `json:"entry"`
}

// ClusterMasterSitesEntry represents a site of an indexer cluster with its metadata
type ClusterMasterSitesEntry struct {
Name string `json:"name"`
Content ClusterMasterSitesContent `json:"content"`
// RollHotBuckets roll hot buckets in cluster
func RollHotBuckets(deployment *Deployment) bool {
podName := fmt.Sprintf("splunk-%s-cluster-master-0", deployment.GetName())
stdin := "/opt/splunk/bin/splunk rolling-restart cluster-peers -auth admin:$(cat /mnt/splunk-secrets/password)"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we just use the roll-hot-buckets command instead of rolling-restart?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I will do that as part of next PR. I just added this util but its not being used. I am using roll-hot-buckets on standalone which is part of this PR and will follow similar pattern for Indexer cluster.

command := []string{"/bin/sh"}
stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false)
if err != nil {
logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command)
return false
}
logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr)
if strings.Contains(stdout, "Rolling restart of all cluster peers has been initiated.") {
return true
}
return false
}

// ClusterMasterSitesContent represents detailed information about a site
type ClusterMasterSitesContent struct {
Peers map[string]ClusterMasterSitesPeer `json:"peers"`
// RollingRestartEndpointResponse is represtentation of /services/cluster/master/info endpiont
type RollingRestartEndpointResponse struct {
Entry []struct {
Content struct {
RollingRestartFlag bool `json:"rolling_restart_flag"`
} `json:"content"`
} `json:"entry"`
}

// ClusterMasterSitesPeer reprensents an indexer peer member of a site
type ClusterMasterSitesPeer struct {
ServerName string `json:"server_name"`
// CheckRollingRestartStatus checks if rolling restart is happening in cluster
func CheckRollingRestartStatus(deployment *Deployment) bool {
podName := fmt.Sprintf("splunk-%s-cluster-master-0", deployment.GetName())
stdin := "curl -ks -u admin:$(cat /mnt/splunk-secrets/password) https://localhost:8089/services/cluster/master/info?output_mode=json"
command := []string{"/bin/sh"}
stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false)
if err != nil {
logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command)
return false
}
logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr)
restResponse := RollingRestartEndpointResponse{}
err = json.Unmarshal([]byte(stdout), &restResponse)
if err != nil {
logf.Log.Error(err, "Failed to parse cluster searchheads")
return false
}
rollingRestart := true
for _, entry := range restResponse.Entry {
rollingRestart = entry.Content.RollingRestartFlag
}
return rollingRestart
}
31 changes: 31 additions & 0 deletions test/testenv/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/config"

enterprisev1 "github.com/splunk/splunk-operator/pkg/apis/enterprise/v1beta1"
splcommon "github.com/splunk/splunk-operator/pkg/splunk/common"
)

// Deployment simply represents the deployment (standalone, clustered,...etc) we create on the testenv
Expand Down Expand Up @@ -427,3 +428,33 @@ func (d *Deployment) DeployStandalonewithGivenSpec(name string, spec enterprisev
}
return deployed.(*enterprisev1.Standalone), err
}

// DeployStandaloneWithIndexes deploys a standalone splunk enterprise instance on the specified testenv
func (d *Deployment) DeployStandaloneWithIndexes(name string, indexesSecret string, volumeName, string, indexName string) (*enterprisev1.Standalone, error) {

s3Endpoint := "https://s3-" + s3Region + ".amazonaws.com"
volumeSpec := GenerateIndexVolumeSpec(volumeName, s3Endpoint, testIndexesS3Bucket, indexesSecret)
indexSpec := GenerateIndexSpec(indexName, volumeName)
spec := enterprisev1.StandaloneSpec{
CommonSplunkSpec: enterprisev1.CommonSplunkSpec{
Spec: splcommon.Spec{
ImagePullPolicy: "IfNotPresent",
},
Volumes: []corev1.Volume{},
},
SmartStore: enterprisev1.SmartStoreSpec{
VolList: []enterprisev1.VolumeSpec{
volumeSpec,
},
IndexList: []enterprisev1.IndexSpec{
indexSpec,
},
},
}
standalone := newStandaloneWithSpec(name, d.testenv.namespace, spec)
deployed, err := d.deployCR(name, standalone)
if err != nil {
return nil, err
}
return deployed.(*enterprisev1.Standalone), err
}
30 changes: 30 additions & 0 deletions test/testenv/ingest_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,3 +222,33 @@ func CopyFileToPod(podName string, srcPath string, destPath string, deployment *

//go:linkname cpMakeTar k8s.io/kubernetes/pkg/kubectl/cmd/cp.makeTar
func cpMakeTar(srcPath, destPath string, writer io.Writer) error

// IngestFileViaMonitor ingests a file into an instance using the oneshot CLI
func IngestFileViaMonitor(logFile string, indexName string, podName string, deployment *Deployment) error {

// Monitor log into specified index
var addMonitorCmd strings.Builder
splunkBin := "/opt/splunk/bin/splunk"
username := "admin"
password := "$(cat /mnt/splunk-secrets/password)"
splunkCmd := "add monitor"

fmt.Fprintf(&addMonitorCmd, "%s %s %s -index %s -auth %s:%s", splunkBin, splunkCmd, logFile, indexName, username, password)
command := []string{"/bin/bash"}
stdin := addMonitorCmd.String()
addMonitorResp, stderr, err := deployment.PodExecCommand(podName, command, stdin, false)
if err != nil {
logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "stdin", stdin, "addMonitorResp", addMonitorResp, "stderr", stderr)
return err
}

// Validate the expected CLI response
var expectedResp strings.Builder
fmt.Fprintf(&expectedResp, "Added monitor of '%s'", logFile)
if strings.Compare(addMonitorResp, expectedResp.String()) == 0 {
logf.Log.Error(err, "Failed response to add monitor to splunk", "pod", podName, "addMonitorResp", addMonitorResp)
return err
}
logf.Log.Info("File Ingested via add monitor Successfully", "logFile", logFile, "addMonitorResp", addMonitorResp)
return nil
}
34 changes: 0 additions & 34 deletions test/testenv/lmutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,8 @@ package testenv

import (
"encoding/json"
"fmt"
"os"
"strings"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"

logf "sigs.k8s.io/controller-runtime/pkg/log"
)

Expand Down Expand Up @@ -62,30 +55,3 @@ func CheckLicenseMasterConfigured(deployment *Deployment, podName string) bool {
logf.Log.Info("License Master configuration on POD", "POD", podName, "License Master", licenseMaster)
return strings.Contains(licenseMaster, "license-master-service:8089")
}

// DownloadFromS3Bucket downloads license file from S3
func DownloadFromS3Bucket() (string, error) {
dataBucket := os.Getenv("TEST_BUCKET")
location := os.Getenv("ENTERPRISE_LICENSE_LOCATION")
fmt.Printf("%s : dataBucket %s : location\n", os.Getenv("TEST_BUCKET"), os.Getenv("ENTERPRISE_LICENSE_LOCATION"))
item := "enterprise.lic"
file, err := os.Create(item)
if err != nil {
logf.Log.Error(err, "Failed to create license file")
}
defer file.Close()

sess, _ := session.NewSession(&aws.Config{Region: aws.String("us-west-2")})
downloader := s3manager.NewDownloader(sess)
numBytes, err := downloader.Download(file,
&s3.GetObjectInput{
Bucket: aws.String(dataBucket),
Key: aws.String(location + "/" + "enterprise.lic"),
})
if err != nil {
logf.Log.Error(err, "Failed to download license file")
}

logf.Log.Info("Downloaded", "filename", file.Name(), "bytes", numBytes)
return file.Name(), err
}
Loading