Skip to content

Commit

Permalink
[target-allocator] Introduce "per node" allocation strategy to target…
Browse files Browse the repository at this point in the history
… allocator (#2430)
  • Loading branch information
matej-g committed Feb 9, 2024
1 parent 94c8420 commit 02e44fb
Show file tree
Hide file tree
Showing 34 changed files with 801 additions and 51 deletions.
16 changes: 16 additions & 0 deletions .chloggen/per-node-allocation-strategy.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. operator, target allocator, github action)
component: target allocator

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Add new "per node" allocation strategy to target allocator. This strategy will allocate targets to nodes on which given target resides. It should only be used conjunction with the daemonset mode.

# One or more tracking issues related to the change
issues: [1828]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:
1 change: 1 addition & 0 deletions .github/workflows/e2e.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ jobs:
- e2e-autoscale
- e2e-pdb
- e2e-opampbridge
- e2e-targetallocator
- e2e-prometheuscr
- e2e-multi-instrumentation
include:
Expand Down
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -246,6 +246,11 @@ e2e-multi-instrumentation:
e2e-opampbridge:
$(KUTTL) test --config kuttl-test-opampbridge.yaml

# Target allocator end-to-tests
.PHONY: e2e-targetallocator
e2e-targetallocator:
$(KUTTL) test --config kuttl-test-targetallocator.yaml

.PHONY: prepare-e2e
prepare-e2e: kuttl set-image-controller add-image-targetallocator add-image-opampbridge container container-target-allocator container-operator-opamp-bridge start-kind cert-manager install-metrics-server install-targetallocator-prometheus-crds load-image-all deploy

Expand Down
5 changes: 4 additions & 1 deletion apis/v1alpha1/allocation_strategy.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ package v1alpha1

type (
// OpenTelemetryTargetAllocatorAllocationStrategy represent which strategy to distribute target to each collector
// +kubebuilder:validation:Enum=least-weighted;consistent-hashing
// +kubebuilder:validation:Enum=least-weighted;consistent-hashing;per-node
OpenTelemetryTargetAllocatorAllocationStrategy string
)

Expand All @@ -26,4 +26,7 @@ const (

// OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing targets will be consistently added to collectors, which allows a high-availability setup.
OpenTelemetryTargetAllocatorAllocationStrategyConsistentHashing OpenTelemetryTargetAllocatorAllocationStrategy = "consistent-hashing"

// OpenTelemetryTargetAllocatorAllocationStrategyPerNode targets will be assigned to the collector on the node they reside on (use only with daemon set).
OpenTelemetryTargetAllocatorAllocationStrategyPerNode OpenTelemetryTargetAllocatorAllocationStrategy = "per-node"
)
66 changes: 43 additions & 23 deletions apis/v1alpha1/collector_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,32 +228,14 @@ func (c CollectorWebhook) validate(ctx context.Context, r *OpenTelemetryCollecto
return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the attribute 'AdditionalContainers'", r.Spec.Mode)
}

// validate target allocation
if r.Spec.TargetAllocator.Enabled && r.Spec.Mode != ModeStatefulSet {
return warnings, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the target allocation deployment", r.Spec.Mode)
}

// validate Prometheus config for target allocation
// validate target allocator configs
if r.Spec.TargetAllocator.Enabled {
promCfg, err := ta.ConfigToPromConfig(r.Spec.Config)
if err != nil {
return warnings, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
}
err = ta.ValidatePromConfig(promCfg, r.Spec.TargetAllocator.Enabled, featuregate.EnableTargetAllocatorRewrite.IsEnabled())
if err != nil {
return warnings, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
taWarnings, err := c.validateTargetAllocatorConfig(ctx, r)
if taWarnings != nil {
warnings = append(warnings, taWarnings...)
}
err = ta.ValidateTargetAllocatorConfig(r.Spec.TargetAllocator.PrometheusCR.Enabled, promCfg)
if err != nil {
return warnings, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
}
// if the prometheusCR is enabled, it needs a suite of permissions to function
if r.Spec.TargetAllocator.PrometheusCR.Enabled {
if subjectAccessReviews, err := c.reviewer.CheckPolicyRules(ctx, r.GetNamespace(), r.Spec.TargetAllocator.ServiceAccount, targetAllocatorCRPolicyRules...); err != nil {
return warnings, fmt.Errorf("unable to check rbac rules %w", err)
} else if allowed, deniedReviews := rbac.AllSubjectAccessReviewsAllowed(subjectAccessReviews); !allowed {
warnings = append(warnings, warningsGroupedByResource(deniedReviews)...)
}
return warnings, err
}
}

Expand Down Expand Up @@ -365,6 +347,44 @@ func (c CollectorWebhook) validate(ctx context.Context, r *OpenTelemetryCollecto
return warnings, nil
}

func (c CollectorWebhook) validateTargetAllocatorConfig(ctx context.Context, r *OpenTelemetryCollector) (admission.Warnings, error) {
if r.Spec.Mode != ModeStatefulSet && r.Spec.Mode != ModeDaemonSet {
return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which does not support the target allocation deployment", r.Spec.Mode)
}

if r.Spec.Mode == ModeDaemonSet && r.Spec.TargetAllocator.AllocationStrategy != OpenTelemetryTargetAllocatorAllocationStrategyPerNode {
return nil, fmt.Errorf("the OpenTelemetry Collector mode is set to %s, which must be used with target allocation strategy %s ", r.Spec.Mode, OpenTelemetryTargetAllocatorAllocationStrategyPerNode)
}

if r.Spec.TargetAllocator.AllocationStrategy == OpenTelemetryTargetAllocatorAllocationStrategyPerNode && r.Spec.Mode != ModeDaemonSet {
return nil, fmt.Errorf("target allocation strategy %s is only supported in OpenTelemetry Collector mode %s", OpenTelemetryTargetAllocatorAllocationStrategyPerNode, ModeDaemonSet)
}

// validate Prometheus config for target allocation
promCfg, err := ta.ConfigToPromConfig(r.Spec.Config)
if err != nil {
return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
}
err = ta.ValidatePromConfig(promCfg, r.Spec.TargetAllocator.Enabled, featuregate.EnableTargetAllocatorRewrite.IsEnabled())
if err != nil {
return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
}
err = ta.ValidateTargetAllocatorConfig(r.Spec.TargetAllocator.PrometheusCR.Enabled, promCfg)
if err != nil {
return nil, fmt.Errorf("the OpenTelemetry Spec Prometheus configuration is incorrect, %w", err)
}
// if the prometheusCR is enabled, it needs a suite of permissions to function
if r.Spec.TargetAllocator.PrometheusCR.Enabled {
if subjectAccessReviews, err := c.reviewer.CheckPolicyRules(ctx, r.GetNamespace(), r.Spec.TargetAllocator.ServiceAccount, targetAllocatorCRPolicyRules...); err != nil {
return nil, fmt.Errorf("unable to check rbac rules %w", err)
} else if allowed, deniedReviews := rbac.AllSubjectAccessReviewsAllowed(subjectAccessReviews); !allowed {
return warningsGroupedByResource(deniedReviews), nil
}
}

return nil, nil
}

func checkAutoscalerSpec(autoscaler *AutoscalerSpec) error {
if autoscaler.Behavior != nil {
if autoscaler.Behavior.ScaleDown != nil && autoscaler.Behavior.ScaleDown.StabilizationWindowSeconds != nil &&
Expand Down
13 changes: 13 additions & 0 deletions apis/v1alpha1/collector_webhook_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,19 @@ func TestOTELColValidatingWebhook(t *testing.T) {
},
expectedErr: "the OpenTelemetry Spec Prometheus configuration is incorrect",
},
{
name: "invalid target allocation strategy",
otelcol: OpenTelemetryCollector{
Spec: OpenTelemetryCollectorSpec{
Mode: ModeDaemonSet,
TargetAllocator: OpenTelemetryTargetAllocator{
Enabled: true,
AllocationStrategy: OpenTelemetryTargetAllocatorAllocationStrategyLeastWeighted,
},
},
},
expectedErr: "mode is set to daemonset, which must be used with target allocation strategy per-node",
},
{
name: "invalid port name",
otelcol: OpenTelemetryCollector{
Expand Down
3 changes: 2 additions & 1 deletion apis/v1alpha1/opentelemetrycollector_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,8 @@ type OpenTelemetryTargetAllocator struct {
// +optional
Resources v1.ResourceRequirements `json:"resources,omitempty"`
// AllocationStrategy determines which strategy the target allocator should use for allocation.
// The current options are least-weighted and consistent-hashing. The default option is consistent-hashing
// The current options are least-weighted, consistent-hashing and per-node. The default is
// consistent-hashing.
// +optional
// +kubebuilder:default:=consistent-hashing
AllocationStrategy OpenTelemetryTargetAllocatorAllocationStrategy `json:"allocationStrategy,omitempty"`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5067,11 +5067,12 @@ spec:
default: consistent-hashing
description: AllocationStrategy determines which strategy the
target allocator should use for allocation. The current options
are least-weighted and consistent-hashing. The default option
is consistent-hashing
are least-weighted, consistent-hashing and per-node. The default
is consistent-hashing.
enum:
- least-weighted
- consistent-hashing
- per-node
type: string
enabled:
description: Enabled indicates whether to use a target allocation
Expand Down
6 changes: 4 additions & 2 deletions cmd/otel-allocator/allocation/allocatortest.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ func MakeNCollectors(n int, startingIndex int) map[string]*Collector {
toReturn[collector] = &Collector{
Name: collector,
NumTargets: 0,
NodeName: fmt.Sprintf("node-%d", i),
}
}
return toReturn
Expand All @@ -60,8 +61,9 @@ func MakeNNewTargetsWithEmptyCollectors(n int, startingIndex int) map[string]*ta
toReturn := map[string]*target.Item{}
for i := startingIndex; i < n+startingIndex; i++ {
label := model.LabelSet{
"i": model.LabelValue(strconv.Itoa(i)),
"total": model.LabelValue(strconv.Itoa(n + startingIndex)),
"i": model.LabelValue(strconv.Itoa(i)),
"total": model.LabelValue(strconv.Itoa(n + startingIndex)),
"__meta_kubernetes_pod_node_name": model.LabelValue("node-0"),
}
newTarget := target.NewItem(fmt.Sprintf("test-job-%d", i), fmt.Sprintf("test-url-%d", i), label, "")
toReturn[newTarget.Hash()] = newTarget
Expand Down
2 changes: 1 addition & 1 deletion cmd/otel-allocator/allocation/consistent_hashing.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ func (c *consistentHashingAllocator) handleCollectors(diff diff.Changes[*Collect
}
// Insert the new collectors
for _, i := range diff.Additions() {
c.collectors[i.Name] = NewCollector(i.Name)
c.collectors[i.Name] = NewCollector(i.Name, i.NodeName)
c.consistentHasher.Add(c.collectors[i.Name])
}

Expand Down
2 changes: 1 addition & 1 deletion cmd/otel-allocator/allocation/least_weighted.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func (allocator *leastWeightedAllocator) handleCollectors(diff diff.Changes[*Col
}
// Insert the new collectors
for _, i := range diff.Additions() {
allocator.collectors[i.Name] = NewCollector(i.Name)
allocator.collectors[i.Name] = NewCollector(i.Name, i.NodeName)
}
if allocateTargets {
for _, item := range allocator.targetItems {
Expand Down
Loading

0 comments on commit 02e44fb

Please sign in to comment.