Skip to content

Commit

Permalink
Merge pull request #2184 from jcantrill/log4532
Browse files Browse the repository at this point in the history
LOG-4568: change limitspec field name to PodLimit
  • Loading branch information
openshift-ci[bot] committed Oct 3, 2023
2 parents 09e097d + 7d9afc2 commit 5a6af17
Show file tree
Hide file tree
Showing 14 changed files with 59 additions and 50 deletions.
6 changes: 3 additions & 3 deletions apis/logging/v1/cluster_log_forwarder.go
Expand Up @@ -171,16 +171,16 @@ func (input *InputSpec) Types() sets.String {
// HasPolicy returns whether the input spec has flow control policies defined in it.
func (input *InputSpec) HasPolicy() bool {
if input.Application != nil &&
(input.Application.ContainerLimit != nil ||
(input.Application.PodLimit != nil ||
input.Application.GroupLimit != nil) {
return true
}
return false
}

func (input *InputSpec) GetMaxRecordsPerSecond() int64 {
if input.Application.ContainerLimit != nil {
return input.Application.ContainerLimit.MaxRecordsPerSecond
if input.Application.PodLimit != nil {
return input.Application.PodLimit.MaxRecordsPerSecond
} else {
return input.Application.GroupLimit.MaxRecordsPerSecond
}
Expand Down
6 changes: 3 additions & 3 deletions apis/logging/v1/cluster_log_forwarder_types.go
Expand Up @@ -171,12 +171,12 @@ type Application struct {
//+operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:hidden"}
GroupLimit *LimitSpec `json:"-"` //`json:"groupLimit,omitempty"`

// Container limit applied to each container selected
// by this input. No container selected by this input can
// PodLimit applied to each pod selected
// by this input. No pod selected by this input can
// exceed this limit.
//
// +optional
ContainerLimit *LimitSpec `json:"containerLimit,omitempty"`
PodLimit *LimitSpec `json:"podLimit,omitempty"`
}

// Infrastructure enables infrastructure logs. Filtering may be added in future.
Expand Down
4 changes: 2 additions & 2 deletions apis/logging/v1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion bundle/manifests/clusterlogging.clusterserviceversion.yaml
Expand Up @@ -118,7 +118,7 @@ metadata:
certified: "false"
console.openshift.io/plugins: '["logging-view-plugin"]'
containerImage: quay.io/openshift-logging/cluster-logging-operator:latest
createdAt: "2023-09-26T17:11:39Z"
createdAt: "2023-10-02T18:19:30Z"
description: The Red Hat OpenShift Logging Operator for OCP provides a means for
configuring and managing your aggregated logging stack.
olm.skipRange: '>=5.6.0-0 <5.8.0'
Expand Down
21 changes: 10 additions & 11 deletions bundle/manifests/logging.openshift.io_clusterlogforwarders.yaml
Expand Up @@ -258,24 +258,23 @@ spec:
description: Application, if present, enables named set of `application`
logs that can specify a set of match criteria
properties:
containerLimit:
description: Container limit applied to each container selected
by this input. No container selected by this input can
exceed this limit.
properties:
maxRecordsPerSecond:
description: MaxRecordsPerSecond is the maximum number
of log records allowed per input/output in a pipeline
format: int64
type: integer
type: object
namespaces:
description: Namespaces from which to collect application
logs. Only messages from these namespaces are collected.
If absent or empty, logs are collected from all namespaces.
items:
type: string
type: array
podLimit:
description: PodLimit applied to each pod selected by this
input. No pod selected by this input can exceed this limit.
properties:
maxRecordsPerSecond:
description: MaxRecordsPerSecond is the maximum number
of log records allowed per input/output in a pipeline
format: int64
type: integer
type: object
selector:
description: Selector for logs from pods with matching labels.
Only messages from pods with these labels are collected.
Expand Down
21 changes: 10 additions & 11 deletions config/crd/bases/logging.openshift.io_clusterlogforwarders.yaml
Expand Up @@ -259,24 +259,23 @@ spec:
description: Application, if present, enables named set of `application`
logs that can specify a set of match criteria
properties:
containerLimit:
description: Container limit applied to each container selected
by this input. No container selected by this input can
exceed this limit.
properties:
maxRecordsPerSecond:
description: MaxRecordsPerSecond is the maximum number
of log records allowed per input/output in a pipeline
format: int64
type: integer
type: object
namespaces:
description: Namespaces from which to collect application
logs. Only messages from these namespaces are collected.
If absent or empty, logs are collected from all namespaces.
items:
type: string
type: array
podLimit:
description: PodLimit applied to each pod selected by this
input. No pod selected by this input can exceed this limit.
properties:
maxRecordsPerSecond:
description: MaxRecordsPerSecond is the maximum number
of log records allowed per input/output in a pipeline
format: int64
type: integer
type: object
selector:
description: Selector for logs from pods with matching labels.
Only messages from pods with these labels are collected.
Expand Down
11 changes: 11 additions & 0 deletions docs/features/collection.adoc
Expand Up @@ -111,6 +111,17 @@ logstash 7.10.1|✓|
- retrymaxinterval
- retrytimeout

|======
.Vector Tuning
[options="header"]
|======
|Feature|Desc.
|Application Input Flow Control
| Specify the max rate of incoming logs for a defined application input
|Output Flow Control
| Specify the max rate of logs sent to a given output. Excess logs are dropped


|======

=== Metrics and Alerting
Expand Down
16 changes: 8 additions & 8 deletions docs/reference/operator/api.adoc
Expand Up @@ -94,12 +94,18 @@ All conditions in the selector must be satisfied (logical AND) to select logs.
|======================
|Property|Type|Description

|containerLimit|object| *(optional)* Container limit applied to each container selected
|namespaces|array| *(optional)* Namespaces from which to collect application logs.
|podLimit|object| *(optional)* PodLimit applied to each pod selected
|selector|object| *(optional)* Selector for logs from pods with matching labels.
|======================

=== .spec.inputs[].application.containerLimit
=== .spec.inputs[].application.namespaces[]
===== Description

===== Type
* array

=== .spec.inputs[].application.podLimit
===== Description

===== Type
Expand All @@ -112,12 +118,6 @@ All conditions in the selector must be satisfied (logical AND) to select logs.
|maxRecordsPerSecond|int| MaxRecordsPerSecond is the maximum number of log records
|======================

=== .spec.inputs[].application.namespaces[]
===== Description

===== Type
* array

=== .spec.inputs[].application.selector
===== Description

Expand Down
4 changes: 2 additions & 2 deletions internal/generator/vector/inputs.go
Expand Up @@ -68,8 +68,8 @@ func AddThrottle(spec *logging.InputSpec) []generator.Element {
el := []generator.Element{}
input := fmt.Sprintf(UserDefinedInput, spec.Name)

if spec.Application.ContainerLimit != nil {
threshold = spec.Application.ContainerLimit.MaxRecordsPerSecond
if spec.Application.PodLimit != nil {
threshold = spec.Application.PodLimit.MaxRecordsPerSecond
throttle_key = perContainerLimitKeyField
} else {
threshold = spec.Application.GroupLimit.MaxRecordsPerSecond
Expand Down
2 changes: 1 addition & 1 deletion internal/generator/vector/sources_to_pipelines_test.go
Expand Up @@ -447,7 +447,7 @@ source = '''
"podname": "very-important",
},
},
ContainerLimit: &logging.LimitSpec{
PodLimit: &logging.LimitSpec{
MaxRecordsPerSecond: 100,
},
},
Expand Down
Expand Up @@ -191,7 +191,7 @@ func verifyInputs(spec *loggingv1.ClusterLogForwarderSpec, status *loggingv1.Clu
// Check if inputspec has application, infrastructure, audit or receiver specs
case input.Application == nil && input.Infrastructure == nil && input.Audit == nil && input.Receiver == nil:
badInput("inputspec must define one or more of application, infrastructure, audit or receiver")
case input.HasPolicy() && input.Application.ContainerLimit != nil && input.Application.GroupLimit != nil:
case input.HasPolicy() && input.Application.PodLimit != nil && input.Application.GroupLimit != nil:
badInput("inputspec must define only one of container or group limit")
case input.HasPolicy() && input.GetMaxRecordsPerSecond() < 0:
badInput("inputspec cannot have a negative limit threshold")
Expand Down
Expand Up @@ -204,7 +204,7 @@ var _ = Describe("Validate clusterlogforwarderspec", func() {
{
Name: "custom-app",
Application: &loggingv1.Application{
ContainerLimit: &loggingv1.LimitSpec{
PodLimit: &loggingv1.LimitSpec{
MaxRecordsPerSecond: 100,
},
GroupLimit: &loggingv1.LimitSpec{
Expand All @@ -223,7 +223,7 @@ var _ = Describe("Validate clusterlogforwarderspec", func() {
{
Name: "custom-app-container-limit",
Application: &loggingv1.Application{
ContainerLimit: &loggingv1.LimitSpec{
PodLimit: &loggingv1.LimitSpec{
MaxRecordsPerSecond: 100,
},
},
Expand All @@ -248,7 +248,7 @@ var _ = Describe("Validate clusterlogforwarderspec", func() {
{
Name: "custom-app-container-limit",
Application: &loggingv1.Application{
ContainerLimit: &loggingv1.LimitSpec{
PodLimit: &loggingv1.LimitSpec{
MaxRecordsPerSecond: -100,
},
},
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/flowcontrol/flowcontrol_test.go
Expand Up @@ -93,7 +93,7 @@ var _ = Describe("[E2E] FlowControl", func() {
Name: "custom-app-0",
Application: &loggingv1.Application{
Namespaces: []string{stressorNS},
ContainerLimit: &loggingv1.LimitSpec{
PodLimit: &loggingv1.LimitSpec{
MaxRecordsPerSecond: 50,
}, // 10 files and 100 group limit, so 10 lines per file,
},
Expand All @@ -102,7 +102,7 @@ var _ = Describe("[E2E] FlowControl", func() {
Name: "custom-app-1",
Application: &loggingv1.Application{
Namespaces: []string{stressorNS},
ContainerLimit: &loggingv1.LimitSpec{
PodLimit: &loggingv1.LimitSpec{
MaxRecordsPerSecond: 50,
}, // 10 files and 100 group limit, so 10 lines per file,
},
Expand Down Expand Up @@ -175,7 +175,7 @@ var _ = Describe("[E2E] FlowControl", func() {
//GroupLimit: &loggingv1.LimitSpec{
// MaxRecordsPerSecond: 200,
//},
ContainerLimit: &loggingv1.LimitSpec{
PodLimit: &loggingv1.LimitSpec{
MaxRecordsPerSecond: 200,
},
},
Expand Down
2 changes: 1 addition & 1 deletion test/functional/flowcontrol/application_test.go
Expand Up @@ -69,7 +69,7 @@ var _ = Describe("[Functional][FlowControl] Policies at Input", func() {
Skip("Skipping test since flow-control is not supported with fluentd")
}

f.Forwarder.Spec.Inputs[0].Application.ContainerLimit = &logging.LimitSpec{
f.Forwarder.Spec.Inputs[0].Application.PodLimit = &logging.LimitSpec{
MaxRecordsPerSecond: 10,
}

Expand Down

0 comments on commit 5a6af17

Please sign in to comment.