-
Notifications
You must be signed in to change notification settings - Fork 123
CSPL-540: Added utilities and test for s3 indexes #240
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
package smartstore | ||
|
||
import ( | ||
"testing" | ||
"time" | ||
|
||
. "github.com/onsi/ginkgo" | ||
"github.com/onsi/ginkgo/reporters" | ||
. "github.com/onsi/gomega" | ||
|
||
"github.com/splunk/splunk-operator/test/testenv" | ||
) | ||
|
||
const ( | ||
// PollInterval specifies the polling interval | ||
PollInterval = 5 * time.Second | ||
|
||
// ConsistentPollInterval is the interval to use to consistently check a state is stable | ||
ConsistentPollInterval = 200 * time.Millisecond | ||
ConsistentDuration = 2000 * time.Millisecond | ||
) | ||
|
||
var ( | ||
testenvInstance *testenv.TestEnv | ||
testSuiteName = "smartore-" + testenv.RandomDNSName(2) | ||
) | ||
|
||
// TestBasic is the main entry point | ||
func TestBasic(t *testing.T) { | ||
|
||
RegisterFailHandler(Fail) | ||
|
||
junitReporter := reporters.NewJUnitReporter(testSuiteName + "_junit.xml") | ||
RunSpecsWithDefaultAndCustomReporters(t, "Running "+testSuiteName, []Reporter{junitReporter}) | ||
} | ||
|
||
var _ = BeforeSuite(func() { | ||
var err error | ||
testenvInstance, err = testenv.NewDefaultTestEnv(testSuiteName) | ||
Expect(err).ToNot(HaveOccurred()) | ||
}) | ||
|
||
var _ = AfterSuite(func() { | ||
if testenvInstance != nil { | ||
Expect(testenvInstance.Teardown()).ToNot(HaveOccurred()) | ||
} | ||
}) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
package smartstore | ||
|
||
import ( | ||
"fmt" | ||
"os/exec" | ||
"strings" | ||
|
||
. "github.com/onsi/ginkgo" | ||
. "github.com/onsi/gomega" | ||
|
||
"github.com/splunk/splunk-operator/test/testenv" | ||
) | ||
|
||
func dumpGetPods(ns string) { | ||
output, _ := exec.Command("kubectl", "get", "pod", "-n", ns).Output() | ||
for _, line := range strings.Split(string(output), "\n") { | ||
testenvInstance.Log.Info(line) | ||
} | ||
} | ||
|
||
var _ = Describe("Smoke test", func() { | ||
|
||
var deployment *testenv.Deployment | ||
|
||
BeforeEach(func() { | ||
var err error | ||
deployment, err = testenvInstance.NewDeployment(testenv.RandomDNSName(3)) | ||
Expect(err).To(Succeed(), "Unable to create deployment") | ||
}) | ||
|
||
AfterEach(func() { | ||
// When a test spec failed, skip the teardown so we can troubleshoot. | ||
if CurrentGinkgoTestDescription().Failed { | ||
testenvInstance.SkipTeardown = true | ||
} | ||
if deployment != nil { | ||
deployment.Teardown() | ||
} | ||
}) | ||
|
||
Context("Confiugre indexes on standlaone deployment using CR Spec", func() { | ||
It("smartstore: Can configure indexes through app", func() { | ||
volumeName := "test-volume-" + testenv.RandomDNSName(3) | ||
indexName := "test-index-" + testenv.RandomDNSName(3) | ||
testenvInstance.Log.Info("index secret name ", "secret name ", testenvInstance.GetIndexSecretName()) | ||
standalone, err := deployment.DeployStandaloneWithIndexes(deployment.GetName(), testenvInstance.GetIndexSecretName(), deployment.GetName(), volumeName, indexName) | ||
Expect(err).To(Succeed(), "Unable to deploy standalone instance ") | ||
|
||
// Verify standalone goes to ready state | ||
testenv.StandaloneReady(deployment, deployment.GetName(), standalone, testenvInstance) | ||
|
||
// Check index on pod | ||
podName := fmt.Sprintf(testenv.StandalonePod, deployment.GetName(), 0) | ||
testenv.VerifyIndexFoundOnPod(deployment, podName, indexName) | ||
|
||
// Ingest data to the index | ||
logFile := "/opt/splunk/var/log/splunk/splunkd.log" | ||
testenv.IngestFileViaMonitor(logFile, indexName, podName, deployment) | ||
|
||
// Roll Hot Buckets on the test index by restarting splunk | ||
testenv.RollHotToWarm(deployment, podName, indexName) | ||
|
||
// Check for index on S3 | ||
testenv.VerifyIndexExistsOnS3(deployment, podName, indexName) | ||
}) | ||
}) | ||
}) |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,8 +19,31 @@ import ( | |
"fmt" | ||
|
||
logf "sigs.k8s.io/controller-runtime/pkg/log" | ||
"strings" | ||
) | ||
|
||
// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master | ||
// Endpoint: /services/cluster/master/sites | ||
type ClusterMasterSitesResponse struct { | ||
Entries []ClusterMasterSitesEntry `json:"entry"` | ||
} | ||
|
||
// ClusterMasterSitesEntry represents a site of an indexer cluster with its metadata | ||
type ClusterMasterSitesEntry struct { | ||
Name string `json:"name"` | ||
Content ClusterMasterSitesContent `json:"content"` | ||
} | ||
|
||
// ClusterMasterSitesContent represents detailed information about a site | ||
type ClusterMasterSitesContent struct { | ||
Peers map[string]ClusterMasterSitesPeer `json:"peers"` | ||
} | ||
|
||
// ClusterMasterSitesPeer reprensents an indexer peer member of a site | ||
type ClusterMasterSitesPeer struct { | ||
ServerName string `json:"server_name"` | ||
} | ||
|
||
// ClusterMasterHealthResponse is a representation of the health response by a Splunk cluster-master | ||
// Endpoint: /services/cluster/master/health | ||
type ClusterMasterHealthResponse struct { | ||
|
@@ -122,24 +145,52 @@ func CheckSearchHeadRemoved(deployment *Deployment) bool { | |
return searchHeadRemoved | ||
} | ||
|
||
// ClusterMasterSitesResponse is a representation of the sites managed by a Splunk cluster-master | ||
// Endpoint: /services/cluster/master/sites | ||
type ClusterMasterSitesResponse struct { | ||
Entries []ClusterMasterSitesEntry `json:"entry"` | ||
} | ||
|
||
// ClusterMasterSitesEntry represents a site of an indexer cluster with its metadata | ||
type ClusterMasterSitesEntry struct { | ||
Name string `json:"name"` | ||
Content ClusterMasterSitesContent `json:"content"` | ||
// RollHotBuckets roll hot buckets in cluster | ||
func RollHotBuckets(deployment *Deployment) bool { | ||
podName := fmt.Sprintf("splunk-%s-cluster-master-0", deployment.GetName()) | ||
stdin := "/opt/splunk/bin/splunk rolling-restart cluster-peers -auth admin:$(cat /mnt/splunk-secrets/password)" | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Could we just use the roll-hot-buckets command instead of rolling-restart? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I will do that as part of next PR. I just added this util but its not being used. I am using roll-hot-buckets on standalone which is part of this PR and will follow similar pattern for Indexer cluster. |
||
command := []string{"/bin/sh"} | ||
stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) | ||
if err != nil { | ||
logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command) | ||
return false | ||
} | ||
logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) | ||
if strings.Contains(stdout, "Rolling restart of all cluster peers has been initiated.") { | ||
return true | ||
} | ||
return false | ||
} | ||
|
||
// ClusterMasterSitesContent represents detailed information about a site | ||
type ClusterMasterSitesContent struct { | ||
Peers map[string]ClusterMasterSitesPeer `json:"peers"` | ||
// RollingRestartEndpointResponse is represtentation of /services/cluster/master/info endpiont | ||
type RollingRestartEndpointResponse struct { | ||
Entry []struct { | ||
Content struct { | ||
RollingRestartFlag bool `json:"rolling_restart_flag"` | ||
} `json:"content"` | ||
} `json:"entry"` | ||
} | ||
|
||
// ClusterMasterSitesPeer reprensents an indexer peer member of a site | ||
type ClusterMasterSitesPeer struct { | ||
ServerName string `json:"server_name"` | ||
// CheckRollingRestartStatus checks if rolling restart is happening in cluster | ||
func CheckRollingRestartStatus(deployment *Deployment) bool { | ||
podName := fmt.Sprintf("splunk-%s-cluster-master-0", deployment.GetName()) | ||
stdin := "curl -ks -u admin:$(cat /mnt/splunk-secrets/password) https://localhost:8089/services/cluster/master/info?output_mode=json" | ||
command := []string{"/bin/sh"} | ||
stdout, stderr, err := deployment.PodExecCommand(podName, command, stdin, false) | ||
if err != nil { | ||
logf.Log.Error(err, "Failed to execute command on pod", "pod", podName, "command", command) | ||
return false | ||
} | ||
logf.Log.Info("Command executed on pod", "pod", podName, "command", command, "stdin", stdin, "stdout", stdout, "stderr", stderr) | ||
restResponse := RollingRestartEndpointResponse{} | ||
err = json.Unmarshal([]byte(stdout), &restResponse) | ||
if err != nil { | ||
logf.Log.Error(err, "Failed to parse cluster searchheads") | ||
return false | ||
} | ||
rollingRestart := true | ||
for _, entry := range restResponse.Entry { | ||
rollingRestart = entry.Content.RollingRestartFlag | ||
} | ||
return rollingRestart | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nitpick: Typos