Skip to content

Commit

Permalink
chore: bump sumo ot distro to 0.0.50-beta.0 (#2127)
Browse files Browse the repository at this point in the history
* chore: bump sumo ot distro to 0.0.50-beta.0

* tests(integration): add daemonset metadata check for logs
  • Loading branch information
pmalek committed Feb 15, 2022
1 parent 0b9b60f commit fb7760f
Show file tree
Hide file tree
Showing 9 changed files with 159 additions and 27 deletions.
4 changes: 2 additions & 2 deletions deploy/helm/sumologic/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3426,7 +3426,7 @@ metadata:
## Configure image for Opentelemetry Collector (for logs and metrics)
image:
repository: public.ecr.aws/sumologic/sumologic-otel-collector
tag: 0.0.48-beta.0
tag: 0.0.50-beta.0
pullPolicy: IfNotPresent

securityContext:
Expand Down Expand Up @@ -4246,7 +4246,7 @@ otellogs:
## Configure image for Opentelemetry Collector
image:
repository: public.ecr.aws/sumologic/sumologic-otel-collector
tag: 0.0.48-beta.0
tag: 0.0.50-beta.0
pullPolicy: IfNotPresent

logLevel: info
Expand Down
2 changes: 1 addition & 1 deletion tests/helm/logs_otc_daemonset/static/basic.output.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ spec:
containers:
- args:
- --config=/etc/otelcol/config.yaml
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.0.48-beta.0
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.0.50-beta.0
imagePullPolicy: IfNotPresent
name: otelcol
livenessProbe:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
priorityClassName: "prio"
containers:
- name: otelcol
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.0.48-beta.0
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.0.50-beta.0
imagePullPolicy: IfNotPresent
args:
- --config=/etc/otel/config.yaml
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ spec:
priorityClassName: "prio"
containers:
- name: otelcol
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.0.48-beta.0
image: public.ecr.aws/sumologic/sumologic-otel-collector:0.0.50-beta.0
imagePullPolicy: IfNotPresent
args:
- --config=/etc/otel/config.yaml
Expand Down
3 changes: 2 additions & 1 deletion tests/integration/helm_default_installation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,8 @@ func Test_Helm_Default_FluentD_Metadata(t *testing.T) {
Feature()

featLogs := features.New("logs").
Setup(stepfuncs.GenerateLogsWithDeployment(
Setup(stepfuncs.GenerateLogs(
stepfuncs.LogsGeneratorDeployment,
logsGeneratorCount,
internal.LogsGeneratorName,
internal.LogsGeneratorNamespace,
Expand Down
60 changes: 56 additions & 4 deletions tests/integration/helm_otc_metadata_installation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -279,33 +279,54 @@ func Test_Helm_Default_OT_Metadata(t *testing.T) {

log.V(0).InfoS("sample's labels", "labels", labels)
return labels.MatchAll(expectedLabels)

}, waitDuration, tickDuration)
return ctx
},
).
Feature()

featLogs := features.New("logs").
Setup(stepfuncs.GenerateLogsWithDeployment(
Setup(stepfuncs.GenerateLogs(
stepfuncs.LogsGeneratorDeployment,
logsGeneratorCount,
internal.LogsGeneratorName,
internal.LogsGeneratorNamespace,
internal.LogsGeneratorImage,
)).
Setup(stepfuncs.GenerateLogs(
stepfuncs.LogsGeneratorDaemonSet,
logsGeneratorCount,
internal.LogsGeneratorName,
internal.LogsGeneratorNamespace,
internal.LogsGeneratorImage,
)).
Assess("logs from log generator present", stepfuncs.WaitUntilExpectedLogsPresent(
Assess("logs from log generator deployment present", stepfuncs.WaitUntilExpectedLogsPresent(
logsGeneratorCount,
map[string]string{
"namespace": internal.LogsGeneratorName,
"pod_labels_app": internal.LogsGeneratorName,
"deployment": internal.LogsGeneratorName,
},
internal.ReceiverMockNamespace,
internal.ReceiverMockServiceName,
internal.ReceiverMockServicePort,
waitDuration,
tickDuration,
)).
Assess("expected container log metadata is present", stepfuncs.WaitUntilExpectedLogsPresent(
Assess("logs from log generator daemonset present", stepfuncs.WaitUntilExpectedLogsPresent(
logsGeneratorCount,
map[string]string{
"namespace": internal.LogsGeneratorName,
"pod_labels_app": internal.LogsGeneratorName,
"daemonset": internal.LogsGeneratorName,
},
internal.ReceiverMockNamespace,
internal.ReceiverMockServiceName,
internal.ReceiverMockServicePort,
waitDuration,
tickDuration,
)).
Assess("expected container log metadata is present for log generator deployment", stepfuncs.WaitUntilExpectedLogsPresent(
logsGeneratorCount,
map[string]string{
"_collector": "kubernetes",
Expand All @@ -330,6 +351,30 @@ func Test_Helm_Default_OT_Metadata(t *testing.T) {
waitDuration,
tickDuration,
)).
Assess("expected container log metadata is present for log generator daemonset", stepfuncs.WaitUntilExpectedLogsPresent(
logsGeneratorCount,
map[string]string{
"_collector": "kubernetes",
"namespace": internal.LogsGeneratorName,
"pod_labels_app": internal.LogsGeneratorName,
"container": internal.LogsGeneratorName,
"daemonset": internal.LogsGeneratorName,
"pod": "",
"k8s.pod.id": "",
"k8s.pod.pod_name": "",
"k8s.container.id": "",
"host": "",
"node": "",
"_sourceName": "",
"_sourceCategory": "",
"_sourceHost": "",
},
internal.ReceiverMockNamespace,
internal.ReceiverMockServiceName,
internal.ReceiverMockServicePort,
waitDuration,
tickDuration,
)).
Assess("logs from node systemd present", stepfuncs.WaitUntilExpectedLogsPresent(
10, // we don't really control this, just want to check if the logs show up
map[string]string{
Expand Down Expand Up @@ -363,6 +408,13 @@ func Test_Helm_Default_OT_Metadata(t *testing.T) {
terrak8s.RunKubectl(t, &opts, "delete", "deployment", internal.LogsGeneratorName)
return ctx
}).
Teardown(
func(ctx context.Context, t *testing.T, envConf *envconf.Config) context.Context {
opts := *ctxopts.KubectlOptions(ctx)
opts.Namespace = internal.LogsGeneratorNamespace
terrak8s.RunKubectl(t, &opts, "delete", "daemonset", internal.LogsGeneratorName)
return ctx
}).
Teardown(stepfuncs.KubectlDeleteNamespaceOpt(internal.LogsGeneratorNamespace)).
Feature()

Expand Down
3 changes: 2 additions & 1 deletion tests/integration/helm_otelcol_logs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,8 @@ func Test_Helm_Otelcol_Logs(t *testing.T) {
).
Feature()
featLogs := features.New("logs").
Setup(stepfuncs.GenerateLogsWithDeployment(
Setup(stepfuncs.GenerateLogs(
stepfuncs.LogsGeneratorDeployment,
logsGeneratorCount,
internal.LogsGeneratorName,
internal.LogsGeneratorNamespace,
Expand Down
46 changes: 46 additions & 0 deletions tests/integration/internal/logsgenerator/logsgenerator.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,52 @@ func GetLogsGeneratorDeployment(
}
}

func GetLogsGeneratorDaemonSet(
namespace string,
name string,
image string,
options LogsGeneratorOptions,
) appsv1.DaemonSet {
appLabels := map[string]string{
"app": name,
}
metadata := metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: appLabels,
}

// There's no way to tell the log generator to keep running after it's done generating logs.
// This is annoying if we want to run it in a Deployment and not have it be restarted after exiting
// so we sleep after it exits.
generatorArgs := optionsToArgumentList(options)
logsGeneratorCommand := fmt.Sprintf("logs-generator %s", strings.Join(generatorArgs, " "))
logsGeneratorAndSleepCommand := fmt.Sprintf("%s; sleep %f", logsGeneratorCommand, deploymentSleepTime.Seconds())

podTemplateSpec := corev1.PodTemplateSpec{
ObjectMeta: metadata,
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: name,
Image: image,
Command: []string{"/bin/bash", "-c", "--"},
Args: []string{logsGeneratorAndSleepCommand},
},
},
},
}
return appsv1.DaemonSet{
ObjectMeta: metadata,
Spec: appsv1.DaemonSetSpec{
Template: podTemplateSpec,
Selector: &metav1.LabelSelector{
MatchLabels: appLabels,
},
},
}
}

func optionsToArgumentList(options LogsGeneratorOptions) []string {
// Note: this could be made cleaner with reflection and struct field tags, but we don't
// really need the complexity, and this logic is unlikely to change a lot
Expand Down
64 changes: 48 additions & 16 deletions tests/integration/internal/stepfuncs/logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,17 @@ import (
"github.com/SumoLogic/sumologic-kubernetes-collection/tests/integration/internal/logsgenerator"
)

// Generate logsCount logs using a Deployment
func GenerateLogsWithDeployment(
type logsGeneratorImplType uint

const (
LogsGeneratorDeployment = iota
LogsGeneratorDaemonSet
)

// Generate logsCount logs using the designated implementation type:
// either deployment or a daemonset.
func GenerateLogs(
implType logsGeneratorImplType,
logsCount uint,
logsGeneratorName string,
logsGeneratorNamespace string,
Expand All @@ -25,20 +34,43 @@ func GenerateLogsWithDeployment(
client := envConf.Client()
generatorOptions := *logsgenerator.NewDefaultGeneratorOptions()
generatorOptions.TotalLogs = logsCount
deployment := logsgenerator.GetLogsGeneratorDeployment(
logsGeneratorNamespace,
logsGeneratorName,
logsGeneratorImage,
generatorOptions,
)

// create the namespace
namespace := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: logsGeneratorNamespace}}
require.NoError(t, client.Resources().Create(ctx, &namespace))

// create the deployment
err := client.Resources(logsGeneratorNamespace).Create(ctx, &deployment)
require.NoError(t, err)

var namespace corev1.Namespace
err := client.Resources().Get(ctx, logsGeneratorNamespace, "", &namespace)
if err != nil {
// create the namespace
namespace := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: logsGeneratorNamespace}}
require.NoError(t, client.Resources().Create(ctx, &namespace))
}

switch implType {
case LogsGeneratorDeployment:
deployment := logsgenerator.GetLogsGeneratorDeployment(
logsGeneratorNamespace,
logsGeneratorName,
logsGeneratorImage,
generatorOptions,
)

// create the deployment
err := client.Resources(logsGeneratorNamespace).Create(ctx, &deployment)
require.NoError(t, err)

case LogsGeneratorDaemonSet:
daemonset := logsgenerator.GetLogsGeneratorDaemonSet(
logsGeneratorNamespace,
logsGeneratorName,
logsGeneratorImage,
generatorOptions,
)

// create the daemonset
err := client.Resources(logsGeneratorNamespace).Create(ctx, &daemonset)
require.NoError(t, err)

default:
t.Fatalf("Unknown log generator deployment model: %v", implType)
}

return ctx
}
Expand Down

0 comments on commit fb7760f

Please sign in to comment.