Skip to content

Commit

Permalink
STAC-21039: Various fixes for unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
craffit committed May 3, 2024
1 parent 03734c5 commit 69f5b17
Show file tree
Hide file tree
Showing 21 changed files with 88 additions and 80 deletions.
32 changes: 16 additions & 16 deletions pkg/collector/corechecks/cluster/ksm/kubernetes_state_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1346,7 +1346,7 @@ func TestKSMCheck_processLabelsAsTags(t *testing.T) {
name: "Initially empty",
config: &KSMConfig{
labelJoins: map[string]*joinsConfig{},
LabelsMapper: map[string][]string{},
LabelsMapper: map[string]string{},
LabelsAsTags: map[string]map[string]string{
"pod": {"my_pod_label": "my_pod_tag"},
},
Expand All @@ -1370,7 +1370,7 @@ func TestKSMCheck_processLabelsAsTags(t *testing.T) {
labelsToGet: map[string]string{"standard_pod_label": "standard_pod_tag"},
},
},
LabelsMapper: map[string][]string{},
LabelsMapper: map[string]string{},
LabelsAsTags: map[string]map[string]string{
"pod": {"my_pod_label": "my_pod_tag"},
"node": {"my_node_label": "my_node_tag"},
Expand Down Expand Up @@ -1457,32 +1457,32 @@ func TestKSMCheck_mergeLabelsMapper(t *testing.T) {
tests := []struct {
name string
config *KSMConfig
extra map[string][]string
expected map[string][]string
extra map[string]string
expected map[string]string
}{
{
name: "collision",
config: &KSMConfig{LabelsMapper: map[string][]string{"foo": {"bar"}, "baz": {"baf"}}},
extra: map[string][]string{"foo": {"tar"}, "tar": {"foo"}},
expected: map[string][]string{"foo": {"bar"}, "baz": {"baf"}, "tar": {"foo"}},
config: &KSMConfig{LabelsMapper: map[string]string{"foo": "bar", "baz": "baf"}},
extra: map[string]string{"foo": "tar", "tar": "foo"},
expected: map[string]string{"foo": "bar", "baz": "baf", "tar": "foo"},
},
{
name: "no collision",
config: &KSMConfig{LabelsMapper: map[string][]string{"foo": {"bar"}, "baz": {"baf"}}},
extra: map[string][]string{"tar": {"foo"}},
expected: map[string][]string{"foo": {"bar"}, "baz": {"baf"}, "tar": {"foo"}},
config: &KSMConfig{LabelsMapper: map[string]string{"foo": "bar", "baz": "baf"}},
extra: map[string]string{"tar": "foo"},
expected: map[string]string{"foo": "bar", "baz": "baf", "tar": "foo"},
},
{
name: "empty LabelsMapper",
config: &KSMConfig{LabelsMapper: map[string][]string{}},
extra: map[string][]string{"tar": {"foo"}},
expected: map[string][]string{"tar": {"foo"}},
config: &KSMConfig{LabelsMapper: map[string]string{}},
extra: map[string]string{"tar": "foo"},
expected: map[string]string{"tar": "foo"},
},
{
name: "empty extra",
config: &KSMConfig{LabelsMapper: map[string][]string{"tar": {"foo"}}},
extra: map[string][]string{},
expected: map[string][]string{"tar": {"foo"}},
config: &KSMConfig{LabelsMapper: map[string]string{"tar": "foo"}},
extra: map[string]string{},
expected: map[string][]string{"tar": "foo"},
},
}
for _, tt := range tests {
Expand Down
4 changes: 2 additions & 2 deletions pkg/collector/corechecks/cluster/kubeapi/kubernetes_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ type CommonCheck struct {
ac *apiserver.APIClient
}

func (k *CommonCheck) ConfigureKubeAPICheck(config integration.Data, source string) error {
return k.CommonConfigure(config, source)
func (k *CommonCheck) ConfigureKubeAPICheck(senderManager sender.SenderManager, integrationConfigDigest uint64, config, initConfig integration.Data, source string) error {
return k.CommonConfigure(senderManager, integrationConfigDigest, config, initConfig, source)
}

func (k *CommonCheck) InitKubeAPICheck() error {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@ import (

"github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/urn"
"github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver"

"github.com/DataDog/datadog-agent/pkg/util/log"

"github.com/DataDog/datadog-agent/pkg/metrics"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
v1 "k8s.io/api/core/v1"
)

Expand Down Expand Up @@ -162,9 +161,9 @@ func newKubernetesEventMapper(detector apiserver.OpenShiftDetector, clusterName

var _ KubernetesEventMapperFactory = newKubernetesEventMapper // Compile-time check

func (k *kubernetesEventMapper) mapKubernetesEvent(event *v1.Event) (metrics.Event, error) {
func (k *kubernetesEventMapper) mapKubernetesEvent(event *v1.Event) (event.Event, error) {
if err := checkEvent(event); err != nil {
return metrics.Event{}, err
return event.Event{}, err
}

// Map Category to event type
Expand Down Expand Up @@ -240,15 +239,15 @@ func (k *kubernetesEventMapper) getCategory(event *v1.Event) EventCategory {
return Others
}

func getAlertType(event *v1.Event) metrics.EventAlertType {
func getAlertType(event *v1.Event) event.EventAlertType {
switch strings.ToLower(event.Type) {
case "normal":
return metrics.EventAlertTypeInfo
return event.EventAlertTypeInfo
case "warning":
return metrics.EventAlertTypeWarning
return event.EventAlertTypeWarning
default:
log.Warnf("Unhandled kubernetes event type '%s', fallback to metrics.EventAlertTypeInfo", event.Type)
return metrics.EventAlertTypeInfo
return event.EventAlertTypeInfo
}
}

Expand Down
10 changes: 5 additions & 5 deletions pkg/collector/corechecks/cluster/kubeapi/kubernetes_events.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ package kubeapi
import (
"context"
"fmt"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/util"
"strings"
"time"
Expand All @@ -18,7 +19,6 @@ import (
"gopkg.in/yaml.v2"
v1 "k8s.io/api/core/v1"

"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/autodiscovery/integration"
"github.com/DataDog/datadog-agent/pkg/collector/check"
core "github.com/DataDog/datadog-agent/pkg/collector/corechecks"
Expand Down Expand Up @@ -104,8 +104,8 @@ func KubernetesAPIEventsFactory() check.Check {
}

// Configure parses the check configuration and init the check.
func (k *EventsCheck) Configure(config, initConfig integration.Data, source string) error {
err := k.ConfigureKubeAPICheck(config, source)
func (k *EventsCheck) Configure(senderManager sender.SenderManager, integrationConfigDigest uint64, config, initConfig integration.Data, source string) error {
err := k.ConfigureKubeAPICheck(senderManager, integrationConfigDigest, config, initConfig, source)
if err != nil {
return err
}
Expand Down Expand Up @@ -184,7 +184,7 @@ func (k *EventsCheck) Run() error {
return nil
}

sender, err := aggregator.GetSender(k.ID())
sender, err := k.GetSender()
if err != nil {
return err
}
Expand Down Expand Up @@ -302,7 +302,7 @@ func (k *EventsCheck) eventCollectionCheck() (newEvents []*v1.Event, err error)
// - iterates over the Kubernetes Events
// - extracts some attributes and builds a structure ready to be submitted as a StackState event
// - convert each K8s event to a metrics event to be processed by the intake
func (k *EventsCheck) processEvents(sender aggregator.Sender, events []*v1.Event) {
func (k *EventsCheck) processEvents(sender sender.Sender, events []*v1.Event) {
mapper := k.mapperFactory(k.ac, k.clusterName, k.instance.EventCategories)
for _, event := range events {
mappedEvent, err := mapper.mapKubernetesEvent(event)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ package kubeapi

import (
"fmt"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/metrics"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/metrics/event"
"github.com/DataDog/datadog-agent/pkg/util/log"
v1 "k8s.io/api/core/v1"
obj "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -72,7 +72,7 @@ func (k *EventsCheck) podEventsCollectionCheck() (pods []*v1.Pod, err error) {
return pods, nil
}

func (k *EventsCheck) processPods(sender aggregator.Sender, pods []*v1.Pod) {
func (k *EventsCheck) processPods(sender sender.Sender, pods []*v1.Pod) {
mapper := k.mapperFactory(k.ac, k.clusterName, k.instance.EventCategories)
for _, pod := range pods {
events := k.podToEventMapper(pod, mapper, sender)
Expand All @@ -84,8 +84,8 @@ func (k *EventsCheck) processPods(sender aggregator.Sender, pods []*v1.Pod) {
}
}

func (k *EventsCheck) podToEventMapper(pod *v1.Pod, mapper *kubernetesEventMapper, sender aggregator.Sender) []metrics.Event {
var events []metrics.Event
func (k *EventsCheck) podToEventMapper(pod *v1.Pod, mapper *kubernetesEventMapper, sender sender.Sender) []event.Event {
var events []event.Event

// Test on active Status. This will be the current state the pod is in not the previous state
for _, containerStatus := range pod.Status.ContainerStatuses {
Expand All @@ -107,7 +107,7 @@ func (k *EventsCheck) podToEventMapper(pod *v1.Pod, mapper *kubernetesEventMappe
}

// mapPodToMetricEventForOutOfMemory Attempt to map a pod to a metric event which can be forwarded to the aggregator
func (k *EventsCheck) mapPodToMetricEventForOutOfMemory(pod *v1.Pod, containerName string, terminatedState *v1.ContainerStateTerminated, mapper *kubernetesEventMapper) (metrics.Event, error) {
func (k *EventsCheck) mapPodToMetricEventForOutOfMemory(pod *v1.Pod, containerName string, terminatedState *v1.ContainerStateTerminated, mapper *kubernetesEventMapper) (event.Event, error) {
event := &v1.Event{
InvolvedObject: v1.ObjectReference{
Name: pod.Name,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"context"
"errors"
"fmt"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/config"
"github.com/DataDog/datadog-agent/pkg/util"
"github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername"
Expand All @@ -19,7 +20,6 @@ import (
"gopkg.in/yaml.v2"
"k8s.io/api/core/v1"

"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/autodiscovery/integration"
"github.com/DataDog/datadog-agent/pkg/collector/check"
core "github.com/DataDog/datadog-agent/pkg/collector/corechecks"
Expand Down Expand Up @@ -129,7 +129,7 @@ func (k *MetricsCheck) Run() error {
return err
}

sender, err := aggregator.GetSender(k.ID())
sender, err := k.GetSender()
if err != nil {
return err
}
Expand Down Expand Up @@ -167,7 +167,7 @@ func (k *MetricsCheck) Run() error {
return nil
}

func (k *MetricsCheck) parseComponentStatus(sender aggregator.Sender, componentsStatus *v1.ComponentStatusList) error {
func (k *MetricsCheck) parseComponentStatus(sender sender.Sender, componentsStatus *v1.ComponentStatusList) error {
for _, component := range componentsStatus.Items {

if component.ObjectMeta.Name == "" {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ package kubeapi
import (
"fmt"
"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"
"github.com/DataDog/datadog-agent/pkg/util/log"
v1 "k8s.io/api/core/v1"
"time"
Expand Down Expand Up @@ -60,7 +61,7 @@ func (k *MetricsCheck) processPods(sender aggregator.Sender, pods []*v1.Pod) {
}
}

func (k *MetricsCheck) podToMetricMappingForOutOfMemory(pod *v1.Pod, sender aggregator.Sender) {
func (k *MetricsCheck) podToMetricMappingForOutOfMemory(pod *v1.Pod, sender sender.Sender) {
// Go through the pods statuses and attempt to find a OOM state
for _, containerStatus := range pod.Status.ContainerStatuses {
value := float64(0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@ package kubeapi
import (
"errors"
"fmt"
"github.com/DataDog/datadog-agent/pkg/aggregator/sender"

osq "github.com/openshift/api/quota/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"

"github.com/DataDog/datadog-agent/pkg/aggregator"
"github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver"
"github.com/DataDog/datadog-agent/pkg/util/log"
)
Expand Down Expand Up @@ -49,7 +49,7 @@ func (k *MetricsCheck) retrieveOShiftClusterQuotas() ([]osq.ClusterResourceQuota
}

// reportClusterQuotas reports metrics on OpenShift ClusterResourceQuota objects
func (k *MetricsCheck) reportClusterQuotas(quotas []osq.ClusterResourceQuota, sender aggregator.Sender) {
func (k *MetricsCheck) reportClusterQuotas(quotas []osq.ClusterResourceQuota, sender sender.Sender) {
for _, quota := range quotas {
quotaTags := []string{fmt.Sprintf("clusterquota:%s", quota.Name)}
remaining := computeQuotaRemaining(quota.Status.Total.Used, quota.Status.Total.Hard)
Expand All @@ -67,7 +67,7 @@ func (k *MetricsCheck) reportClusterQuotas(quotas []osq.ClusterResourceQuota, se
}
}

func (k *MetricsCheck) reportQuota(quotas v1.ResourceList, metricPrefix, metricSuffix string, tags []string, sender aggregator.Sender) {
func (k *MetricsCheck) reportQuota(quotas v1.ResourceList, metricPrefix, metricSuffix string, tags []string, sender sender.Sender) {
for res, qty := range quotas {
metricName := fmt.Sprintf("%s.%s.%s", metricPrefix, res, metricSuffix)
sender.Gauge(metricName, quantityToFloat64(qty), "", tags)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ func warnDisabledResource(name string, additionalWarning string, isEnabled bool)
}

// Configure parses the check configuration and init the check.
func (t *TopologyCheck) Configure(config, initConfig integration.Data, source string) error {
err := t.ConfigureKubeAPICheck(config, source)
func (t *TopologyCheck) Configure(senderManager sender.SenderManager, integrationConfigDigest uint64, config, initConfig integration.Data, source string) error {
err := t.ConfigureKubeAPICheck(senderManagerm, integrationConfigDigest, config, initConfig, source)
if err != nil {
return err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ package kubeapi

import (
"github.com/DataDog/datadog-agent/pkg/batcher"
"github.com/DataDog/datadog-agent/pkg/collector/check"
checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id"
"github.com/DataDog/datadog-agent/pkg/config"
"github.com/DataDog/datadog-agent/pkg/topology"
"github.com/DataDog/datadog-agent/pkg/util/log"
Expand All @@ -24,7 +24,7 @@ type TopologyConfig struct {
ConfigMapMaxDataSize int `yaml:"configmap_max_datasize"`
CSIPVMapperEnabled bool `yaml:"csi_pv_mapper_enabled"`
Resources ResourcesConfig `yaml:"resources"`
CheckID check.ID
CheckID checkid.ID
Instance topology.Instance
}

Expand Down Expand Up @@ -87,7 +87,7 @@ type TopologySubmitter interface {
}

// NewBatchTopologySubmitter creates a new instance of BatchTopologySubmitter
func NewBatchTopologySubmitter(checkID check.ID, instance topology.Instance) TopologySubmitter {
func NewBatchTopologySubmitter(checkID checkid.ID, instance topology.Instance) TopologySubmitter {
return &BatchTopologySubmitter{
CheckID: checkID,
Instance: instance,
Expand All @@ -96,7 +96,7 @@ func NewBatchTopologySubmitter(checkID check.ID, instance topology.Instance) Top

// BatchTopologySubmitter provides functionality to submit topology data with the Batcher.
type BatchTopologySubmitter struct {
CheckID check.ID
CheckID checkid.ID
Instance topology.Instance
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
"testing"

"github.com/DataDog/datadog-agent/pkg/batcher"
"github.com/DataDog/datadog-agent/pkg/collector/check"
checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id"
collectors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/topologycollectors"
agentConfig "github.com/DataDog/datadog-agent/pkg/config"
"github.com/DataDog/datadog-agent/pkg/topology"
Expand Down Expand Up @@ -212,7 +212,7 @@ func testRunClusterCollectors(t *testing.T, sourceProperties bool, exposeKuberne
}

// NewTestTopologySubmitter creates a new instance of TestTopologySubmitter
func NewTestTopologySubmitter(t *testing.T, checkID check.ID, instance topology.Instance) TopologySubmitter {
func NewTestTopologySubmitter(t *testing.T, checkID checkid.ID, instance topology.Instance) TopologySubmitter {
return &TestTopologySubmitter{
t: t,
CheckID: checkID,
Expand All @@ -223,7 +223,7 @@ func NewTestTopologySubmitter(t *testing.T, checkID check.ID, instance topology.
// TestTopologySubmitter provides functionality to submit topology data with the Batcher.
type TestTopologySubmitter struct {
t *testing.T
CheckID check.ID
CheckID checkid.ID
Instance topology.Instance
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1402,8 +1402,8 @@ func (m MockIngressAPICollectorClient) GetIngressesExtV1B1() ([]v1beta1.Ingress,
},
},
Status: v1beta1.IngressStatus{
LoadBalancer: netV1.IngressLoadBalancerStatus{
Ingress: []netV1.IngressLoadBalancerIngress{
LoadBalancer: v1beta1.IngressLoadBalancerStatus{
Ingress: []v1beta1.IngressLoadBalancerIngress{
{IP: "34.100.200.15"},
{Hostname: "64047e8f24bb48e9a406ac8286ee8b7d.eu-west-1.elb.amazonaws.com"},
},
Expand Down Expand Up @@ -1488,8 +1488,8 @@ func (m MockIngressAPICollectorClientNoHTTPRule) GetIngressesExtV1B1() ([]v1beta
},
},
Status: v1beta1.IngressStatus{
LoadBalancer: netV1.IngressLoadBalancerStatus{
Ingress: []netV1.IngressLoadBalancerIngress{
LoadBalancer: v1beta1.IngressLoadBalancerStatus{
Ingress: []v1beta1.IngressLoadBalancerIngress{
{IP: "34.100.200.15"},
{Hostname: "64047e8f24bb48e9a406ac8286ee8b7d.eu-west-1.elb.amazonaws.com"},
},
Expand Down

0 comments on commit 69f5b17

Please sign in to comment.