New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Implement Stackdriver Logging e2e tests using PubSub #45255
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -38,13 +38,17 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Feature:Stackdr | |
gclLogsProvider, err := newGclLogsProvider(f) | ||
framework.ExpectNoError(err, "Failed to create GCL logs provider") | ||
|
||
err = gclLogsProvider.Init() | ||
defer gclLogsProvider.Cleanup() | ||
framework.ExpectNoError(err, "Failed to init GCL logs provider") | ||
|
||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items | ||
nodeCount := len(nodes) | ||
podCount := 30 * nodeCount | ||
loggingDuration := 10 * time.Minute | ||
linesPerSecond := 1000 * nodeCount | ||
linesPerPod := linesPerSecond * int(loggingDuration.Seconds()) / podCount | ||
ingestionTimeout := 60 * time.Minute | ||
ingestionTimeout := 20 * time.Minute | ||
|
||
By("Running logs generator pods") | ||
pods := []*loggingPod{} | ||
|
@@ -56,9 +60,6 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Feature:Stackdr | |
defer f.PodClient().Delete(podName, &meta_v1.DeleteOptions{}) | ||
} | ||
|
||
By("Waiting for pods to succeed") | ||
time.Sleep(loggingDuration) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I really like this :D |
||
|
||
By("Waiting for all log lines to be ingested") | ||
config := &loggingTestConfig{ | ||
LogsProvider: gclLogsProvider, | ||
|
@@ -79,12 +80,16 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Feature:Stackdr | |
gclLogsProvider, err := newGclLogsProvider(f) | ||
framework.ExpectNoError(err, "Failed to create GCL logs provider") | ||
|
||
err = gclLogsProvider.Init() | ||
defer gclLogsProvider.Cleanup() | ||
framework.ExpectNoError(err, "Failed to init GCL logs provider") | ||
|
||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items | ||
maxPodCount := 10 | ||
jobDuration := 1 * time.Minute | ||
linesPerPodPerSecond := 100 | ||
testDuration := 10 * time.Minute | ||
ingestionTimeout := 60 * time.Minute | ||
ingestionTimeout := 20 * time.Minute | ||
|
||
podRunDelay := time.Duration(int64(jobDuration) / int64(maxPodCount)) | ||
podRunCount := int(testDuration.Seconds())/int(podRunDelay.Seconds()) - 1 | ||
|
@@ -102,9 +107,6 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL [Feature:Stackdr | |
time.Sleep(podRunDelay) | ||
} | ||
|
||
By("Waiting for the last pods to finish") | ||
time.Sleep(jobDuration) | ||
|
||
By("Waiting for all log lines to be ingested") | ||
config := &loggingTestConfig{ | ||
LogsProvider: gclLogsProvider, | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is this enough?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should be, since we start ingesting log entries as soon as they start flowing. From the manual runs that's more than enough, but I don't know of any guarantees/SLOs here (docs say log entries should appear "right away". I suggest leaving this as it is and then applying a fix if that's not enough, though I think timing out would indicate a problem with fluentd not being performant enough, rather than test being too restrictive on time.