From fc14f5415272190be8060159831fe90aa33f76db Mon Sep 17 00:00:00 2001
From: Paul Abel
Date: Mon, 3 Mar 2025 17:33:51 +0000
Subject: [PATCH 01/11] suppress scale when using zone sync on ratelimit policy
---
internal/configs/ingress.go | 7 +-
internal/configs/ingress_test.go | 8 +-
internal/configs/virtualserver.go | 61 ++++++++++-----
internal/configs/virtualserver_test.go | 102 +++++++++++++++++++++++--
tests/suite/test_rl_policies_vsr.py | 81 ++++++++++++++++++++
5 files changed, 228 insertions(+), 31 deletions(-)
diff --git a/internal/configs/ingress.go b/internal/configs/ingress.go
index 456c77be31..778a50b2ed 100644
--- a/internal/configs/ingress.go
+++ b/internal/configs/ingress.go
@@ -284,7 +284,12 @@ func generateNginxCfg(p NginxCfgParams) (version1.IngressNginxConfig, Warnings)
if !limitReqZoneExists(limitReqZones, zoneName) {
rate := cfgParams.LimitReqRate
if cfgParams.LimitReqScale && p.ingressControllerReplicas > 0 {
- rate = scaleRatelimit(rate, p.ingressControllerReplicas)
+ if p.ingEx.ZoneSync {
+ warningText := fmt.Sprintf("Ingress %s/%s: both zone sync and rate limit scale are enabled, the rate limit scale value will not be used.", p.ingEx.Ingress.Namespace, p.ingEx.Ingress.Name)
+ nl.Warn(l, warningText)
+ } else {
+ rate = scaleRatelimit(rate, p.ingressControllerReplicas)
+ }
}
limitReqZones = append(limitReqZones, version1.LimitReqZone{
Name: zoneName,
diff --git a/internal/configs/ingress_test.go b/internal/configs/ingress_test.go
index d5576cc653..1f638e67ae 100644
--- a/internal/configs/ingress_test.go
+++ b/internal/configs/ingress_test.go
@@ -1337,14 +1337,11 @@ func TestGenerateNginxCfgForLimitReqWithScaling(t *testing.T) {
for _, server := range result.Servers {
for _, location := range server.Locations {
if !reflect.DeepEqual(location.LimitReq, expectedReqs) {
- t.Errorf("generateNginxCfg returned \n%v, but expected \n%v", result.LimitReqZones, expectedZones)
+ t.Errorf("generateNginxCfg returned \n%v, but expected \n%v", location.LimitReq, expectedReqs)
}
}
}
- if !reflect.DeepEqual(result.LimitReqZones, expectedZones) {
- t.Errorf("generateNginxCfg returned \n%v, but expected \n%v", result.LimitReqZones, expectedZones)
- }
if len(warnings) != 0 {
t.Errorf("generateNginxCfg returned warnings: %v", warnings)
}
@@ -1431,9 +1428,6 @@ func TestGenerateNginxCfgForMergeableIngressesForLimitReqWithScaling(t *testing.
}
}
- if !reflect.DeepEqual(result.LimitReqZones, expectedZones) {
- t.Errorf("generateNginxCfg returned \n%v, but expected \n%v", result.LimitReqZones, expectedZones)
- }
if len(warnings) != 0 {
t.Errorf("generateNginxCfg returned warnings: %v", warnings)
}
diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go
index afdcc4a00d..d5f7369c54 100644
--- a/internal/configs/virtualserver.go
+++ b/internal/configs/virtualserver.go
@@ -1025,6 +1025,10 @@ func newValidationResults() *validationResults {
return &validationResults{}
}
+func (v *validationResults) addWarning(msg string) {
+ v.warnings = append(v.warnings, msg)
+}
+
func (v *validationResults) addWarningf(msgFmt string, args ...interface{}) {
v.warnings = append(v.warnings, fmt.Sprintf(msgFmt, args...))
}
@@ -1042,24 +1046,30 @@ func (p *policiesCfg) addAccessControlConfig(accessControl *conf_v1.AccessContro
}
func (p *policiesCfg) addRateLimitConfig(
- rateLimit *conf_v1.RateLimit,
- polKey string,
- polNamespace string,
- polName string,
+ policy *conf_v1.Policy,
ownerDetails policyOwnerDetails,
podReplicas int,
zoneSync bool,
) *validationResults {
res := newValidationResults()
+ rateLimit := policy.Spec.RateLimit
+ polKey := fmt.Sprintf("%v/%v", policy.Namespace, policy.Name)
- rlZoneName := rfc1123ToSnake(fmt.Sprintf("pol_rl_%v_%v_%v_%v", polNamespace, polName, ownerDetails.vsNamespace, ownerDetails.vsName))
+ rlZoneName := rfc1123ToSnake(fmt.Sprintf("pol_rl_%v_%v_%v_%v", policy.Namespace, policy.Name, ownerDetails.vsNamespace, ownerDetails.vsName))
if rateLimit.Condition != nil && rateLimit.Condition.JWT.Claim != "" && rateLimit.Condition.JWT.Match != "" {
- lrz := generateGroupedLimitReqZone(rlZoneName, rateLimit, podReplicas, ownerDetails, zoneSync)
+ lrz, warningText := generateGroupedLimitReqZone(rlZoneName, policy, podReplicas, ownerDetails, zoneSync)
+ if warningText != "" {
+ res.addWarning(warningText)
+ }
p.RateLimit.PolicyGroupMaps = append(p.RateLimit.PolicyGroupMaps, *generateLRZPolicyGroupMap(lrz))
p.RateLimit.AuthJWTClaimSets = append(p.RateLimit.AuthJWTClaimSets, generateAuthJwtClaimSet(*rateLimit.Condition.JWT, ownerDetails))
p.RateLimit.Zones = append(p.RateLimit.Zones, lrz)
} else {
- p.RateLimit.Zones = append(p.RateLimit.Zones, generateLimitReqZone(rlZoneName, rateLimit, podReplicas, zoneSync))
+ lrz, warningText := generateLimitReqZone(rlZoneName, policy, podReplicas, zoneSync)
+ if warningText != "" {
+ res.addWarning(warningText)
+ }
+ p.RateLimit.Zones = append(p.RateLimit.Zones, lrz)
}
p.RateLimit.Reqs = append(p.RateLimit.Reqs, generateLimitReq(rlZoneName, rateLimit))
@@ -1649,6 +1659,7 @@ func (vsc *virtualServerConfigurator) generatePolicies(
policyOpts policyOptions,
) policiesCfg {
config := newPoliciesConfig(vsc.bundleValidator)
+ l := nl.LoggerFromContext(vsc.cfgParams.Context)
for _, p := range policyRefs {
polNamespace := p.Namespace
@@ -1665,14 +1676,14 @@ func (vsc *virtualServerConfigurator) generatePolicies(
res = config.addAccessControlConfig(pol.Spec.AccessControl)
case pol.Spec.RateLimit != nil:
res = config.addRateLimitConfig(
- pol.Spec.RateLimit,
- key,
- polNamespace,
- p.Name,
+ pol,
ownerDetails,
vsc.IngressControllerReplicas,
policyOpts.zoneSync,
)
+ for _, msgs := range res.warnings {
+ nl.Warn(l, msgs)
+ }
case pol.Spec.JWTAuth != nil:
res = config.addJWTAuthConfig(pol.Spec.JWTAuth, key, polNamespace, policyOpts.secretRefs)
case pol.Spec.BasicAuth != nil:
@@ -1747,10 +1758,17 @@ func generateLimitReq(zoneName string, rateLimitPol *conf_v1.RateLimit) version2
return limitReq
}
-func generateLimitReqZone(zoneName string, rateLimitPol *conf_v1.RateLimit, podReplicas int, zoneSync bool) version2.LimitReqZone {
+func generateLimitReqZone(zoneName string, policy *conf_v1.Policy, podReplicas int, zoneSync bool) (version2.LimitReqZone, string) {
+ rateLimitPol := policy.Spec.RateLimit
rate := rateLimitPol.Rate
+ warningText := ""
+
if rateLimitPol.Scale {
- rate = scaleRatelimit(rateLimitPol.Rate, podReplicas)
+ if zoneSync {
+ warningText = fmt.Sprintf("Policy %s/%s: both zone sync and rate limit scale are enabled, the rate limit scale value will not be used.", policy.Namespace, policy.Name)
+ } else {
+ rate = scaleRatelimit(rateLimitPol.Rate, podReplicas)
+ }
}
return version2.LimitReqZone{
ZoneName: zoneName,
@@ -1758,18 +1776,25 @@ func generateLimitReqZone(zoneName string, rateLimitPol *conf_v1.RateLimit, podR
ZoneSize: rateLimitPol.ZoneSize,
Rate: rate,
Sync: zoneSync,
- }
+ }, warningText
}
func generateGroupedLimitReqZone(zoneName string,
- rateLimitPol *conf_v1.RateLimit,
+ policy *conf_v1.Policy,
podReplicas int,
ownerDetails policyOwnerDetails,
zoneSync bool,
-) version2.LimitReqZone {
+) (version2.LimitReqZone, string) {
+ rateLimitPol := policy.Spec.RateLimit
rate := rateLimitPol.Rate
+ warningText := ""
+
if rateLimitPol.Scale {
- rate = scaleRatelimit(rateLimitPol.Rate, podReplicas)
+ if zoneSync {
+ warningText = fmt.Sprintf("Policy %s/%s: both zone sync and rate limit scale are enabled, the rate limit scale value will not be used.", policy.Namespace, policy.Name)
+ } else {
+ rate = scaleRatelimit(rateLimitPol.Rate, podReplicas)
+ }
}
lrz := version2.LimitReqZone{
ZoneName: zoneName,
@@ -1801,7 +1826,7 @@ func generateGroupedLimitReqZone(zoneName string,
lrz.GroupSource = generateAuthJwtClaimSetVariable(rateLimitPol.Condition.JWT.Claim, ownerDetails.vsNamespace, ownerDetails.vsName)
}
- return lrz
+ return lrz, warningText
}
func generateLimitReqOptions(rateLimitPol *conf_v1.RateLimit) version2.LimitReqOptions {
diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go
index 1bd79225f9..c4c79506c2 100644
--- a/internal/configs/virtualserver_test.go
+++ b/internal/configs/virtualserver_test.go
@@ -4,6 +4,8 @@ import (
"context"
"errors"
"fmt"
+ "io"
+ "log/slog"
"reflect"
"sort"
"strings"
@@ -12,6 +14,9 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/nginx/kubernetes-ingress/internal/configs/version2"
"github.com/nginx/kubernetes-ingress/internal/k8s/secrets"
+ nl "github.com/nginx/kubernetes-ingress/internal/logger"
+ nic_glog "github.com/nginx/kubernetes-ingress/internal/logger/glog"
+ "github.com/nginx/kubernetes-ingress/internal/logger/levels"
"github.com/nginx/kubernetes-ingress/internal/nginx"
conf_v1 "github.com/nginx/kubernetes-ingress/pkg/apis/configuration/v1"
api_v1 "k8s.io/api/core/v1"
@@ -6448,6 +6453,10 @@ func TestGenerateVirtualServerConfigRateLimit(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$binary_remote_addr",
@@ -6594,6 +6603,10 @@ func TestGenerateVirtualServerConfigRateLimit(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$binary_remote_addr",
@@ -6788,6 +6801,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/premium-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "premium-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -6803,6 +6820,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
},
"default/basic-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "basic-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7021,6 +7042,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/premium-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "premium-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7036,6 +7061,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
},
"default/basic-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "basic-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7251,6 +7280,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/premium-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "premium-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7266,6 +7299,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
},
"default/basic-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "basic-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7487,6 +7524,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/premium-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "premium-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7502,6 +7543,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
},
"default/basic-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "basic-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7708,6 +7753,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/premium-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "premium-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7723,6 +7772,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
},
"default/basic-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "basic-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7974,6 +8027,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
Policies: map[string]*conf_v1.Policy{
"default/premium-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "premium-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -7989,6 +8046,10 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
},
"default/basic-rate-limit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "basic-rate-limit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "$jwt_claim_sub",
@@ -8519,7 +8580,7 @@ func TestGeneratePolicies(t *testing.T) {
mTLSCertAndCrlPath := fmt.Sprintf("%s %s", mTLSCertPath, mTLSCrlPath)
policyOpts := policyOptions{
tls: true,
- zoneSync: true,
+ zoneSync: false,
secretRefs: map[string]*secrets.SecretReference{
"default/ingress-mtls-secret": {
Secret: &api_v1.Secret{
@@ -8688,6 +8749,10 @@ func TestGeneratePolicies(t *testing.T) {
},
policies: map[string]*conf_v1.Policy{
"default/rateLimit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test",
@@ -8711,7 +8776,6 @@ func TestGeneratePolicies(t *testing.T) {
ZoneSize: "10M",
Rate: "10r/s",
ZoneName: "pol_rl_default_rateLimit_policy_default_test",
- Sync: true,
},
},
Options: version2.LimitReqOptions{
@@ -8735,6 +8799,10 @@ func TestGeneratePolicies(t *testing.T) {
},
policies: map[string]*conf_v1.Policy{
"default/rateLimit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test",
@@ -8744,6 +8812,10 @@ func TestGeneratePolicies(t *testing.T) {
},
},
"default/rateLimit-policy2": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimit-policy2",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test2",
@@ -8761,14 +8833,12 @@ func TestGeneratePolicies(t *testing.T) {
ZoneSize: "10M",
Rate: "10r/s",
ZoneName: "pol_rl_default_rateLimit_policy_default_test",
- Sync: true,
},
{
Key: "test2",
ZoneSize: "20M",
Rate: "20r/s",
ZoneName: "pol_rl_default_rateLimit_policy2_default_test",
- Sync: true,
},
},
Options: version2.LimitReqOptions{
@@ -8796,6 +8866,10 @@ func TestGeneratePolicies(t *testing.T) {
},
policies: map[string]*conf_v1.Policy{
"default/rateLimitScale-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimitScale-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test",
@@ -8815,7 +8889,6 @@ func TestGeneratePolicies(t *testing.T) {
ZoneSize: "10M",
Rate: "5r/s",
ZoneName: "pol_rl_default_rateLimitScale_policy_default_test",
- Sync: true,
},
},
Options: version2.LimitReqOptions{
@@ -9507,6 +9580,10 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
policies: map[string]*conf_v1.Policy{
"default/rateLimit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test",
@@ -9516,6 +9593,10 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
},
"default/rateLimit-policy2": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimit-policy2",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test2",
@@ -9582,6 +9663,10 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
policies: map[string]*conf_v1.Policy{
"default/rateLimit-policy": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimit-policy",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test",
@@ -9598,6 +9683,10 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
},
"default/rateLimit-policy2": {
+ ObjectMeta: meta_v1.ObjectMeta{
+ Name: "rateLimit-policy2",
+ Namespace: "default",
+ },
Spec: conf_v1.PolicySpec{
RateLimit: &conf_v1.RateLimit{
Key: "test2",
@@ -17816,7 +17905,10 @@ func TestGenerateTimeWithDefault(t *testing.T) {
}
var (
+ l = slog.New(nic_glog.New(io.Discard, &nic_glog.Options{Level: levels.LevelInfo}))
+ ctx = nl.ContextWithLogger(context.Background(), l)
baseCfgParams = ConfigParams{
+ Context: ctx,
ServerTokens: "off",
Keepalive: 16,
ServerSnippets: []string{"# server snippet"},
diff --git a/tests/suite/test_rl_policies_vsr.py b/tests/suite/test_rl_policies_vsr.py
index 50e1753fd8..09a8504270 100644
--- a/tests/suite/test_rl_policies_vsr.py
+++ b/tests/suite/test_rl_policies_vsr.py
@@ -504,6 +504,87 @@ def test_rl_policy_5rs_with_zone_sync_vsr(
)
delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
+ @pytest.mark.skip_for_nginx_oss
+ @pytest.mark.parametrize("src", [rl_vsr_sec_src])
+ def test_rl_policy_5rs_with_zone_sync_vsr(
+ self,
+ kube_apis,
+ ingress_controller_prerequisites,
+ ingress_controller_endpoint,
+ crd_ingress_controller,
+ v_s_route_app_setup,
+ v_s_route_setup,
+ test_namespace,
+ src,
+ ):
+ """
+ Test pods are scaled to 3, ZoneSync is enabled & Policy zone is synced
+ """
+ replica_count = 3
+ NGINX_API_VERSION = 9
+ pol_name = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_sec_src)
+
+ configmap_name = "nginx-config"
+
+ print("Step 1: apply minimal zone_sync nginx-config map")
+ replace_configmap_from_yaml(
+ kube_apis.v1,
+ configmap_name,
+ ingress_controller_prerequisites.namespace,
+ f"{TEST_DATA}/zone-sync/configmap-with-zonesync-minimal.yaml",
+ )
+
+ print("Step 2: apply the policy to the virtual server route")
+ apply_and_assert_valid_vsr(
+ kube_apis,
+ v_s_route_setup.route_m.namespace,
+ v_s_route_setup.route_m.name,
+ src,
+ )
+
+ print(f"Step 3: scale deployments to {replica_count}")
+ scale_deployment(
+ kube_apis.v1,
+ kube_apis.apps_v1_api,
+ "nginx-ingress",
+ ingress_controller_prerequisites.namespace,
+ replica_count,
+ )
+
+ wait_before_test()
+
+ print("Step 4: check if pods are ready")
+ wait_until_all_pods_are_ready(kube_apis.v1, ingress_controller_prerequisites.namespace)
+
+ print("Step 5: check plus api for zone sync")
+ api_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.api_port}"
+
+ stream_url = f"{api_url}/api/{NGINX_API_VERSION}/stream"
+ assert wait_for_zone_sync_enabled(stream_url)
+
+ zone_sync_url = f"{stream_url}/zone_sync"
+ assert wait_for_zone_sync_nodes_online(zone_sync_url, replica_count)
+
+ print("Step 6: check plus api if zone is synced")
+ assert check_synced_zone_exists(zone_sync_url, pol_name.replace("-", "_", -1))
+
+ # revert changes
+ scale_deployment(
+ kube_apis.v1,
+ kube_apis.apps_v1_api,
+ "nginx-ingress",
+ ingress_controller_prerequisites.namespace,
+ 1,
+ )
+ self.restore_default_vsr(kube_apis, v_s_route_setup)
+ replace_configmap_from_yaml(
+ kube_apis.v1,
+ configmap_name,
+ ingress_controller_prerequisites.namespace,
+ f"{TEST_DATA}/zone-sync/default-configmap.yaml",
+ )
+ delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
+
@pytest.mark.skip_for_nginx_oss
@pytest.mark.parametrize("src", [rl_vsr_jwt_claim_sub_src])
def test_rl_policy_jwt_claim_sub_vsr(
From 075d22cd0dfded3b80c2e317952db5382beb131b Mon Sep 17 00:00:00 2001
From: Alex Fenlon
Date: Thu, 13 Mar 2025 16:29:58 +0000
Subject: [PATCH 02/11] remove warning event from vs
---
internal/configs/virtualserver.go | 15 ++--
internal/configs/virtualserver_test.go | 67 +++++++++++-----
tests/suite/test_rl_policies.py | 102 ++++++++++++++++++++++++-
tests/suite/test_rl_policies_vsr.py | 28 ++++++-
4 files changed, 179 insertions(+), 33 deletions(-)
diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go
index d5f7369c54..e38e246437 100644
--- a/internal/configs/virtualserver.go
+++ b/internal/configs/virtualserver.go
@@ -956,6 +956,7 @@ type apiKeyAuth struct {
type policiesCfg struct {
Allow []string
+ Context context.Context
Deny []string
RateLimit rateLimit
JWTAuth jwtAuth
@@ -1025,10 +1026,6 @@ func newValidationResults() *validationResults {
return &validationResults{}
}
-func (v *validationResults) addWarning(msg string) {
- v.warnings = append(v.warnings, msg)
-}
-
func (v *validationResults) addWarningf(msgFmt string, args ...interface{}) {
v.warnings = append(v.warnings, fmt.Sprintf(msgFmt, args...))
}
@@ -1054,12 +1051,13 @@ func (p *policiesCfg) addRateLimitConfig(
res := newValidationResults()
rateLimit := policy.Spec.RateLimit
polKey := fmt.Sprintf("%v/%v", policy.Namespace, policy.Name)
+ l := nl.LoggerFromContext(p.Context)
rlZoneName := rfc1123ToSnake(fmt.Sprintf("pol_rl_%v_%v_%v_%v", policy.Namespace, policy.Name, ownerDetails.vsNamespace, ownerDetails.vsName))
if rateLimit.Condition != nil && rateLimit.Condition.JWT.Claim != "" && rateLimit.Condition.JWT.Match != "" {
lrz, warningText := generateGroupedLimitReqZone(rlZoneName, policy, podReplicas, ownerDetails, zoneSync)
if warningText != "" {
- res.addWarning(warningText)
+ nl.Warn(l, warningText)
}
p.RateLimit.PolicyGroupMaps = append(p.RateLimit.PolicyGroupMaps, *generateLRZPolicyGroupMap(lrz))
p.RateLimit.AuthJWTClaimSets = append(p.RateLimit.AuthJWTClaimSets, generateAuthJwtClaimSet(*rateLimit.Condition.JWT, ownerDetails))
@@ -1067,7 +1065,7 @@ func (p *policiesCfg) addRateLimitConfig(
} else {
lrz, warningText := generateLimitReqZone(rlZoneName, policy, podReplicas, zoneSync)
if warningText != "" {
- res.addWarning(warningText)
+ nl.Warn(l, warningText)
}
p.RateLimit.Zones = append(p.RateLimit.Zones, lrz)
}
@@ -1659,7 +1657,7 @@ func (vsc *virtualServerConfigurator) generatePolicies(
policyOpts policyOptions,
) policiesCfg {
config := newPoliciesConfig(vsc.bundleValidator)
- l := nl.LoggerFromContext(vsc.cfgParams.Context)
+ config.Context = vsc.cfgParams.Context
for _, p := range policyRefs {
polNamespace := p.Namespace
@@ -1681,9 +1679,6 @@ func (vsc *virtualServerConfigurator) generatePolicies(
vsc.IngressControllerReplicas,
policyOpts.zoneSync,
)
- for _, msgs := range res.warnings {
- nl.Warn(l, msgs)
- }
case pol.Spec.JWTAuth != nil:
res = config.addJWTAuthConfig(pol.Spec.JWTAuth, key, polNamespace, policyOpts.secretRefs)
case pol.Spec.BasicAuth != nil:
diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go
index c4c79506c2..958758e7c7 100644
--- a/internal/configs/virtualserver_test.go
+++ b/internal/configs/virtualserver_test.go
@@ -8686,7 +8686,8 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
- Allow: []string{"127.0.0.1"},
+ Allow: []string{"127.0.0.1"},
+ Context: ctx,
},
msg: "explicit reference",
},
@@ -8706,7 +8707,8 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
- Allow: []string{"127.0.0.1"},
+ Allow: []string{"127.0.0.1"},
+ Context: ctx,
},
msg: "implicit reference",
},
@@ -8736,7 +8738,8 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
- Allow: []string{"127.0.0.1", "127.0.0.2"},
+ Allow: []string{"127.0.0.1", "127.0.0.2"},
+ Context: ctx,
},
msg: "merging",
},
@@ -8764,6 +8767,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
RateLimit: rateLimit{
Reqs: []version2.LimitReq{
{
@@ -8826,6 +8830,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
RateLimit: rateLimit{
Zones: []version2.LimitReqZone{
{
@@ -8882,6 +8887,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
RateLimit: rateLimit{
Zones: []version2.LimitReqZone{
{
@@ -8926,6 +8932,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
JWTAuth: jwtAuth{
Auth: &version2.JWTAuth{
Secret: "/etc/nginx/secrets/default-jwt-secret",
@@ -8959,6 +8966,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
JWTAuth: jwtAuth{
Auth: &version2.JWTAuth{
Key: "default/jwt-policy-2",
@@ -8999,6 +9007,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
JWTAuth: jwtAuth{
Auth: &version2.JWTAuth{
Key: "default/jwt-policy-2",
@@ -9038,6 +9047,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
BasicAuth: &version2.BasicAuth{
Secret: "/etc/nginx/secrets/default-htpasswd-secret",
Realm: "My Test API",
@@ -9068,6 +9078,7 @@ func TestGeneratePolicies(t *testing.T) {
},
context: "spec",
expected: policiesCfg{
+ Context: ctx,
IngressMTLS: &version2.IngressMTLS{
ClientCert: mTLSCertPath,
VerifyClient: "off",
@@ -9099,6 +9110,7 @@ func TestGeneratePolicies(t *testing.T) {
},
context: "spec",
expected: policiesCfg{
+ Context: ctx,
IngressMTLS: &version2.IngressMTLS{
ClientCert: mTLSCertPath,
ClientCrl: mTLSCrlPath,
@@ -9132,6 +9144,7 @@ func TestGeneratePolicies(t *testing.T) {
},
context: "spec",
expected: policiesCfg{
+ Context: ctx,
IngressMTLS: &version2.IngressMTLS{
ClientCert: mTLSCertPath,
ClientCrl: mTLSCrlPath,
@@ -9162,6 +9175,7 @@ func TestGeneratePolicies(t *testing.T) {
},
context: "route",
expected: policiesCfg{
+ Context: ctx,
EgressMTLS: &version2.EgressMTLS{
Certificate: "/etc/nginx/secrets/default-egress-mtls-secret",
CertificateKey: "/etc/nginx/secrets/default-egress-mtls-secret",
@@ -9198,6 +9212,7 @@ func TestGeneratePolicies(t *testing.T) {
},
context: "route",
expected: policiesCfg{
+ Context: ctx,
EgressMTLS: &version2.EgressMTLS{
Certificate: "/etc/nginx/secrets/default-egress-mtls-secret",
CertificateKey: "/etc/nginx/secrets/default-egress-mtls-secret",
@@ -9244,7 +9259,8 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
- OIDC: true,
+ Context: ctx,
+ OIDC: true,
},
msg: "oidc reference",
},
@@ -9273,6 +9289,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
APIKey: apiKeyAuth{
Key: &version2.APIKey{
Header: []string{"X-API-Key"},
@@ -9316,6 +9333,7 @@ func TestGeneratePolicies(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
APIKey: apiKeyAuth{
Key: &version2.APIKey{
Header: []string{"X-API-Key"},
@@ -9358,6 +9376,7 @@ func TestGeneratePolicies(t *testing.T) {
},
context: "route",
expected: policiesCfg{
+ Context: ctx,
WAF: &version2.WAF{
Enable: "on",
ApPolicy: "/etc/nginx/waf/nac-policies/default-dataguard-alarm",
@@ -9369,7 +9388,7 @@ func TestGeneratePolicies(t *testing.T) {
},
}
- vsc := newVirtualServerConfigurator(&ConfigParams{Context: context.Background()}, false, false, &StaticConfigParams{}, false, &fakeBV)
+ vsc := newVirtualServerConfigurator(&ConfigParams{Context: ctx}, false, false, &StaticConfigParams{}, false, &fakeBV)
// required to test the scaling of the ratelimit
vsc.IngressControllerReplicas = 2
@@ -9378,7 +9397,7 @@ func TestGeneratePolicies(t *testing.T) {
result := vsc.generatePolicies(ownerDetails, tc.policyRefs, tc.policies, tc.context, policyOpts)
result.BundleValidator = nil
- if !cmp.Equal(tc.expected, result) {
+ if !reflect.DeepEqual(tc.expected, result) {
t.Error(cmp.Diff(tc.expected, result))
}
if len(vsc.warnings) > 0 {
@@ -9426,6 +9445,7 @@ func TestGeneratePolicies_GeneratesWAFPolicyOnValidApBundle(t *testing.T) {
},
context: "route",
want: policiesCfg{
+ Context: ctx,
WAF: &version2.WAF{
Enable: "on",
ApBundle: "/fake/bundle/path/testWAFPolicyBundle.tgz",
@@ -9458,6 +9478,7 @@ func TestGeneratePolicies_GeneratesWAFPolicyOnValidApBundle(t *testing.T) {
},
context: "route",
want: policiesCfg{
+ Context: ctx,
WAF: &version2.WAF{
Enable: "on",
ApBundle: "/fake/bundle/path/testWAFPolicyBundle.tgz",
@@ -9469,10 +9490,10 @@ func TestGeneratePolicies_GeneratesWAFPolicyOnValidApBundle(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- vsc := newVirtualServerConfigurator(&ConfigParams{Context: context.Background()}, false, false, &StaticConfigParams{}, false, &fakeBV)
+ vsc := newVirtualServerConfigurator(&ConfigParams{Context: ctx}, false, false, &StaticConfigParams{}, false, &fakeBV)
res := vsc.generatePolicies(ownerDetails, tc.policyRefs, tc.policies, tc.context, policyOptions{apResources: &appProtectResourcesForVS{}})
res.BundleValidator = nil
- if !cmp.Equal(tc.want, res) {
+ if !reflect.DeepEqual(tc.want, res) {
t.Error(cmp.Diff(tc.want, res))
}
})
@@ -9556,8 +9577,9 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
policyOpts: policyOptions{},
expected: policiesCfg{
- Allow: []string{"127.0.0.1"},
- Deny: []string{"127.0.0.2"},
+ Context: ctx,
+ Allow: []string{"127.0.0.1"},
+ Deny: []string{"127.0.0.2"},
},
expectedWarnings: Warnings{
nil: {
@@ -9611,6 +9633,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
policyOpts: policyOptions{},
expected: policiesCfg{
+ Context: ctx,
RateLimit: rateLimit{
Zones: []version2.LimitReqZone{
{
@@ -9858,6 +9881,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
JWTAuth: jwtAuth{
Auth: &version2.JWTAuth{
Secret: "/etc/nginx/secrets/default-jwt-secret",
@@ -10014,6 +10038,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
},
expected: policiesCfg{
+ Context: ctx,
BasicAuth: &version2.BasicAuth{
Secret: "/etc/nginx/secrets/default-htpasswd-secret",
Realm: "test",
@@ -10157,6 +10182,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
context: "spec",
expected: policiesCfg{
+ Context: ctx,
IngressMTLS: &version2.IngressMTLS{
ClientCert: ingressMTLSCertPath,
VerifyClient: "on",
@@ -10298,6 +10324,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
context: "spec",
expected: policiesCfg{
+ Context: ctx,
IngressMTLS: &version2.IngressMTLS{
ClientCert: ingressMTLSCertPath,
ClientCrl: ingressMTLSCrlPath,
@@ -10361,6 +10388,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
context: "route",
expected: policiesCfg{
+ Context: ctx,
EgressMTLS: &version2.EgressMTLS{
Certificate: "/etc/nginx/secrets/default-egress-mtls-secret",
CertificateKey: "/etc/nginx/secrets/default-egress-mtls-secret",
@@ -10815,7 +10843,8 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
context: "route",
expected: policiesCfg{
- OIDC: true,
+ Context: ctx,
+ OIDC: true,
},
expectedWarnings: Warnings{
nil: {
@@ -11095,6 +11124,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
},
context: "route",
expected: policiesCfg{
+ Context: ctx,
WAF: &version2.WAF{
Enable: "on",
ApPolicy: "/etc/nginx/waf/nac-policies/default-dataguard-alarm",
@@ -11112,7 +11142,7 @@ func TestGeneratePoliciesFails(t *testing.T) {
for _, test := range tests {
t.Run(test.msg, func(t *testing.T) {
- vsc := newVirtualServerConfigurator(&ConfigParams{Context: context.Background()}, false, false, &StaticConfigParams{}, false, &fakeBV)
+ vsc := newVirtualServerConfigurator(&ConfigParams{Context: ctx}, false, false, &StaticConfigParams{}, false, &fakeBV)
if test.oidcPolCfg != nil {
vsc.oidcPolCfg = test.oidcPolCfg
@@ -11120,8 +11150,9 @@ func TestGeneratePoliciesFails(t *testing.T) {
result := vsc.generatePolicies(ownerDetails, test.policyRefs, test.policies, test.context, test.policyOpts)
result.BundleValidator = nil
- if diff := cmp.Diff(test.expected, result); diff != "" {
- t.Errorf("generatePolicies() '%v' mismatch (-want +got):\n%s", test.msg, diff)
+
+ if !reflect.DeepEqual(test.expected, result) {
+ t.Errorf("generatePolicies() '%v' mismatch (-want +got):\n%s", test.msg, cmp.Diff(test.expected, result))
}
if !reflect.DeepEqual(vsc.warnings, test.expectedWarnings) {
t.Errorf(
@@ -11131,11 +11162,11 @@ func TestGeneratePoliciesFails(t *testing.T) {
test.msg,
)
}
- if diff := cmp.Diff(test.expectedOidc.oidc, vsc.oidcPolCfg.oidc); diff != "" {
- t.Errorf("generatePolicies() '%v' mismatch (-want +got):\n%s", test.msg, diff)
+ if !reflect.DeepEqual(test.expectedOidc.oidc, vsc.oidcPolCfg.oidc) {
+ t.Errorf("generatePolicies() '%v' mismatch (-want +got):\n%s", test.msg, cmp.Diff(test.expectedOidc.oidc, vsc.oidcPolCfg.oidc))
}
- if diff := cmp.Diff(test.expectedOidc.key, vsc.oidcPolCfg.key); diff != "" {
- t.Errorf("generatePolicies() '%v' mismatch (-want +got):\n%s", test.msg, diff)
+ if !reflect.DeepEqual(test.expectedOidc.key, vsc.oidcPolCfg.key) {
+ t.Errorf("generatePolicies() '%v' mismatch (-want +got):\n%s", test.msg, cmp.Diff(test.expectedOidc.key, vsc.oidcPolCfg.key))
}
})
}
diff --git a/tests/suite/test_rl_policies.py b/tests/suite/test_rl_policies.py
index 7d39ad402d..0b17cdb7f3 100644
--- a/tests/suite/test_rl_policies.py
+++ b/tests/suite/test_rl_policies.py
@@ -10,8 +10,14 @@
wait_for_zone_sync_enabled,
wait_for_zone_sync_nodes_online,
)
-from suite.utils.policy_resources_utils import apply_and_assert_valid_policy, create_policy_from_yaml, delete_policy
+from suite.utils.policy_resources_utils import (
+ apply_and_assert_valid_policy,
+ create_policy_from_yaml,
+ delete_policy,
+ read_policy,
+)
from suite.utils.resources_utils import (
+ get_first_pod_name,
get_pod_list,
get_vs_nginx_template_conf,
replace_configmap_from_yaml,
@@ -457,6 +463,100 @@ def test_rl_policy_5rs_with_zone_sync(
)
delete_policy(kube_apis.custom_objects, pol_name, test_namespace)
+ @pytest.mark.skip_for_nginx_oss
+ @pytest.mark.parametrize("src", [rl_vs_pri_sca_src])
+ def test_rl_policy_with_scale_and_zone_sync(
+ self,
+ kube_apis,
+ crd_ingress_controller,
+ ingress_controller_prerequisites,
+ ingress_controller_endpoint,
+ virtual_server_setup,
+ test_namespace,
+ src,
+ ):
+ """
+ Test pods are scaled to 3, ZoneSync is enabled & Policy zone is synced
+ """
+ replica_count = 3
+ pol_name = apply_and_assert_valid_policy(kube_apis, test_namespace, rl_pol_pri_sca_src)
+
+ configmap_name = "nginx-config"
+
+ print("Step 1: apply minimal zone_sync nginx-config map")
+ replace_configmap_from_yaml(
+ kube_apis.v1,
+ configmap_name,
+ ingress_controller_prerequisites.namespace,
+ f"{TEST_DATA}/zone-sync/configmap-with-zonesync-minimal.yaml",
+ )
+
+ print("Step 2: apply the policy to the virtual server")
+ # Patch VirtualServer
+ apply_and_assert_valid_vs(
+ kube_apis,
+ virtual_server_setup.namespace,
+ virtual_server_setup.vs_name,
+ src,
+ )
+
+ print(f"Step 3: scale deployments to {replica_count}")
+ scale_deployment(
+ kube_apis.v1,
+ kube_apis.apps_v1_api,
+ "nginx-ingress",
+ ingress_controller_prerequisites.namespace,
+ replica_count,
+ )
+
+ wait_before_test()
+
+ print("Step 4: check if pods are ready")
+ wait_until_all_pods_are_ready(kube_apis.v1, ingress_controller_prerequisites.namespace)
+
+ print("Step 5: check plus api for zone sync")
+ api_url = f"http://{ingress_controller_endpoint.public_ip}:{ingress_controller_endpoint.api_port}"
+
+ stream_url = f"{api_url}/api/{NGINX_API_VERSION}/stream"
+ assert wait_for_zone_sync_enabled(stream_url)
+
+ zone_sync_url = f"{stream_url}/zone_sync"
+ assert wait_for_zone_sync_nodes_online(zone_sync_url, replica_count)
+
+ print("Step 6: check plus api if zone is synced")
+ assert check_synced_zone_exists(zone_sync_url, pol_name.replace("-", "_", -1))
+
+ print("Step 7: check sync in config")
+ pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
+ vs_config = get_vs_nginx_template_conf(
+ kube_apis.v1,
+ virtual_server_setup.namespace,
+ virtual_server_setup.vs_name,
+ pod_name,
+ ingress_controller_prerequisites.namespace,
+ )
+
+ policy = read_policy(kube_apis.custom_objects, test_namespace, pol_name)
+ expected_conf_line = f"limit_req_zone {policy["spec"]["rateLimit"]["key"]} zone=pol_rl_{policy["metadata"]["namespace"].replace("-", "_", -1)}_{pol_name.replace("-", "_", -1)}_{virtual_server_setup.namespace.replace("-", "_", -1)}_{virtual_server_setup.vs_name.replace("-", "_", -1)}:{policy["spec"]["rateLimit"]["zoneSize"]} rate={policy["spec"]["rateLimit"]["rate"]} sync;"
+ assert expected_conf_line in vs_config
+
+ # revert changes
+ scale_deployment(
+ kube_apis.v1,
+ kube_apis.apps_v1_api,
+ "nginx-ingress",
+ ingress_controller_prerequisites.namespace,
+ 1,
+ )
+ self.restore_default_vs(kube_apis, virtual_server_setup)
+ replace_configmap_from_yaml(
+ kube_apis.v1,
+ configmap_name,
+ ingress_controller_prerequisites.namespace,
+ f"{TEST_DATA}/zone-sync/default-configmap.yaml",
+ )
+ delete_policy(kube_apis.custom_objects, pol_name, test_namespace)
+
@pytest.mark.skip_for_nginx_oss
@pytest.mark.parametrize("src", [rl_vs_jwt_claim_sub])
def test_rl_policy_jwt_claim_sub(
diff --git a/tests/suite/test_rl_policies_vsr.py b/tests/suite/test_rl_policies_vsr.py
index 09a8504270..243740ba64 100644
--- a/tests/suite/test_rl_policies_vsr.py
+++ b/tests/suite/test_rl_policies_vsr.py
@@ -10,8 +10,14 @@
wait_for_zone_sync_enabled,
wait_for_zone_sync_nodes_online,
)
-from suite.utils.policy_resources_utils import apply_and_assert_valid_policy, create_policy_from_yaml, delete_policy
+from suite.utils.policy_resources_utils import (
+ apply_and_assert_valid_policy,
+ create_policy_from_yaml,
+ delete_policy,
+ read_policy,
+)
from suite.utils.resources_utils import (
+ get_first_pod_name,
get_pod_list,
replace_configmap_from_yaml,
scale_deployment,
@@ -505,8 +511,8 @@ def test_rl_policy_5rs_with_zone_sync_vsr(
delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
@pytest.mark.skip_for_nginx_oss
- @pytest.mark.parametrize("src", [rl_vsr_sec_src])
- def test_rl_policy_5rs_with_zone_sync_vsr(
+ @pytest.mark.parametrize("src", [rl_vsr_pri_sca_src])
+ def test_rl_policy_with_scale_and_zone_sync_vsr(
self,
kube_apis,
ingress_controller_prerequisites,
@@ -522,7 +528,7 @@ def test_rl_policy_5rs_with_zone_sync_vsr(
"""
replica_count = 3
NGINX_API_VERSION = 9
- pol_name = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_sec_src)
+ pol_name = apply_and_assert_valid_policy(kube_apis, v_s_route_setup.route_m.namespace, rl_pol_pri_sca_src)
configmap_name = "nginx-config"
@@ -568,6 +574,20 @@ def test_rl_policy_5rs_with_zone_sync_vsr(
print("Step 6: check plus api if zone is synced")
assert check_synced_zone_exists(zone_sync_url, pol_name.replace("-", "_", -1))
+ print("Step 7: check sync in config")
+ pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
+ vsr_config = get_vs_nginx_template_conf(
+ kube_apis.v1,
+ v_s_route_setup.namespace,
+ v_s_route_setup.vs_name,
+ pod_name,
+ ingress_controller_prerequisites.namespace,
+ )
+
+ policy = read_policy(kube_apis.custom_objects, v_s_route_setup.route_m.namespace, pol_name)
+ expected_conf_line = f"limit_req_zone {policy["spec"]["rateLimit"]["key"]} zone=pol_rl_{policy["metadata"]["namespace"].replace("-", "_", -1)}_{pol_name.replace("-", "_", -1)}_{v_s_route_setup.route_m.namespace.replace("-", "_", -1)}_{v_s_route_setup.vs_name.replace("-", "_", -1)}:{policy["spec"]["rateLimit"]["zoneSize"]} rate={policy["spec"]["rateLimit"]["rate"]} sync;"
+ assert expected_conf_line in vsr_config
+
# revert changes
scale_deployment(
kube_apis.v1,
From bb9f32215a1da0d9755bbde519b4076f95baf55e Mon Sep 17 00:00:00 2001
From: Paul Abel
Date: Fri, 14 Mar 2025 08:54:37 +0000
Subject: [PATCH 03/11] add ingress zonesync test
---
tests/suite/test_rl_ingress.py | 54 +++++++++++++++++++++++++++-
tests/suite/utils/resources_utils.py | 14 ++++++++
2 files changed, 67 insertions(+), 1 deletion(-)
diff --git a/tests/suite/test_rl_ingress.py b/tests/suite/test_rl_ingress.py
index 04cf9ade09..2c6572338c 100644
--- a/tests/suite/test_rl_ingress.py
+++ b/tests/suite/test_rl_ingress.py
@@ -20,6 +20,7 @@
get_first_pod_name,
get_ingress_nginx_template_conf,
get_pod_list,
+ read_ingress,
replace_configmap_from_yaml,
scale_deployment,
wait_before_test,
@@ -158,8 +159,8 @@ def test_ingress_rate_limit_with_zone_sync(
f"{TEST_DATA}/zone-sync/configmap-with-zonesync-minimal.yaml",
)
+ print("Step 2: apply Ingress")
ingress_name = get_name_from_yaml(src)
-
create_items_from_yaml(kube_apis, src, test_namespace)
print(f"Step 3: scale deployments to {replica_count}")
@@ -239,3 +240,54 @@ def test_ingress_rate_limit_scaled(
assert flag
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1)
+
+
+@pytest.mark.annotations
+@pytest.mark.parametrize("annotations_setup", ["standard-scaled", "mergeable-scaled"], indirect=True)
+class TestRateLimitIngressScaledWithZoneSync:
+ def test_ingress_rate_limit_scaled_with_zone_sync(
+ self, kube_apis, annotations_setup, ingress_controller_prerequisites, test_namespace
+ ):
+ """
+ Test if rate-limit scaling works with standard and mergeable ingresses
+ """
+ print("Step 1: apply minimal zone_sync nginx-config map")
+ configmap_name = "nginx-config"
+ replace_configmap_from_yaml(
+ kube_apis.v1,
+ configmap_name,
+ ingress_controller_prerequisites.namespace,
+ f"{TEST_DATA}/zone-sync/configmap-with-zonesync-minimal.yaml",
+ )
+
+ print("Step 3: scale deployments to 2")
+ ns = ingress_controller_prerequisites.namespace
+ scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 2)
+
+ print("Step 4: check if pods are ready")
+ wait_until_all_pods_are_ready(kube_apis.v1, ingress_controller_prerequisites.namespace)
+
+ print("Step 5: check sync in config")
+ pod_name = get_first_pod_name(kube_apis.v1, ingress_controller_prerequisites.namespace)
+
+ ing_config = get_ingress_nginx_template_conf(
+ kube_apis.v1,
+ annotations_setup.namespace,
+ annotations_setup.ingress_name,
+ pod_name,
+ ingress_controller_prerequisites.namespace,
+ )
+ ingress_name = annotations_setup.ingress_name
+ # if annotations_setup.get("upstream_names") is not None:
+ # print(f"upstream names: {annotations_setup.upstream_names}")
+ ingress = read_ingress(kube_apis.networking_v1, ingress_name, annotations_setup.namespace)
+ key = ingress.metadata.annotations.get("nginx.org/limit-req-key")
+ rate = ingress.metadata.annotations.get("nginx.org/limit-req-rate")
+ zone_size = ingress.metadata.annotations.get("nginx.org/limit-req-zone-size")
+ expected_conf_line = (
+ f"limit_req_zone {key} zone={annotations_setup.namespace}/{ingress_name}:{zone_size} rate={rate} sync;"
+ )
+ assert expected_conf_line in ing_config
+
+ print("Step 6: clean up")
+ scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1)
diff --git a/tests/suite/utils/resources_utils.py b/tests/suite/utils/resources_utils.py
index e7f0948425..041d04d128 100644
--- a/tests/suite/utils/resources_utils.py
+++ b/tests/suite/utils/resources_utils.py
@@ -15,6 +15,7 @@
CoreV1Api,
NetworkingV1Api,
RbacAuthorizationV1Api,
+ V1Ingress,
V1ObjectMeta,
V1Secret,
V1Service,
@@ -2033,3 +2034,16 @@ def get_apikey_policy_details_from_yaml(yaml_manifest) -> dict:
details["queries"] = data["spec"]["apiKey"]["suppliedIn"]["query"]
return details
+
+
+def read_ingress(v1: NetworkingV1Api, name, namespace) -> V1Ingress:
+ """
+ Get details of an Ingress.
+
+ :param v1: NetworkingV1Api
+ :param name: ingress name
+ :param namespace: namespace name
+ :return: V1Ingress
+ """
+ print(f"Read an ingress named '{name}'")
+ return v1.read_namespaced_ingress(name, namespace)
From 74e04a5d66cd736ecef76b94fd96446809d29cb4 Mon Sep 17 00:00:00 2001
From: Alex Fenlon
Date: Fri, 14 Mar 2025 15:09:11 +0000
Subject: [PATCH 04/11] fix VS becoming invalid when applying zone-sync after
VS
---
internal/configs/version2/http.go | 3 ++-
internal/configs/version2/nginx-plus.virtualserver.tmpl | 4 ++--
internal/configs/virtualserver.go | 3 +++
3 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/internal/configs/version2/http.go b/internal/configs/version2/http.go
index 818546b557..23d9cc93e0 100644
--- a/internal/configs/version2/http.go
+++ b/internal/configs/version2/http.go
@@ -408,10 +408,11 @@ type LimitReq struct {
Burst int
NoDelay bool
Delay int
+ Sync bool
}
func (rl LimitReq) String() string {
- return fmt.Sprintf("{ZoneName %q, Burst %q, NoDelay %v, Delay %q}", rl.ZoneName, rl.Burst, rl.NoDelay, rl.Delay)
+ return fmt.Sprintf("{ZoneName %q, Burst %q, NoDelay %v, Delay %q, Sync %v}", rl.ZoneName, rl.Burst, rl.NoDelay, rl.Delay, rl.Sync)
}
// LimitReqOptions defines rate limit options.
diff --git a/internal/configs/version2/nginx-plus.virtualserver.tmpl b/internal/configs/version2/nginx-plus.virtualserver.tmpl
index ee40851713..ed368a8ff1 100644
--- a/internal/configs/version2/nginx-plus.virtualserver.tmpl
+++ b/internal/configs/version2/nginx-plus.virtualserver.tmpl
@@ -67,7 +67,7 @@ map {{ $m.Source }} {{ $m.Variable }} {
{{- end }}
{{- range $z := .LimitReqZones }}
-limit_req_zone {{ $z.Key }} zone={{ $z.ZoneName }}:{{ $z.ZoneSize }} rate={{ $z.Rate }}{{- if $z.Sync }} sync{{- end }};
+limit_req_zone {{ $z.Key }} {{- if $z.Sync }} zone={{ $z.ZoneName }}_sync:{{ $z.ZoneSize }}{{ else }} zone={{ $z.ZoneName }}:{{ $z.ZoneSize }}{{- end }} rate={{ $z.Rate }}{{- if $z.Sync }} sync{{- end }};
{{- end }}
{{- range $m := .StatusMatches }}
@@ -202,7 +202,7 @@ server {
{{- end }}
{{- range $rl := $s.LimitReqs }}
- limit_req zone={{ $rl.ZoneName }}{{ if $rl.Burst }} burst={{ $rl.Burst }}{{ end }}
+ limit_req {{- if $rl.Sync }} zone={{ $rl.ZoneName }}_sync{{ else }} zone={{ $rl.ZoneName }}{{ end }}{{ if $rl.Burst }} burst={{ $rl.Burst }}{{ end }}
{{- if $rl.Delay }} delay={{ $rl.Delay }}{{ end }}{{ if $rl.NoDelay }} nodelay{{ end }};
{{- end }}
diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go
index e38e246437..a79bf05ad8 100644
--- a/internal/configs/virtualserver.go
+++ b/internal/configs/virtualserver.go
@@ -1071,6 +1071,9 @@ func (p *policiesCfg) addRateLimitConfig(
}
p.RateLimit.Reqs = append(p.RateLimit.Reqs, generateLimitReq(rlZoneName, rateLimit))
+ for i := range p.RateLimit.Zones {
+ p.RateLimit.Reqs[i].Sync = p.RateLimit.Zones[i].Sync
+ }
if len(p.RateLimit.Reqs) == 1 {
p.RateLimit.Options = generateLimitReqOptions(rateLimit)
} else {
From ed4e7306914ff75800026c155477b340e5f6275a Mon Sep 17 00:00:00 2001
From: Alex Fenlon
Date: Fri, 14 Mar 2025 17:18:42 +0000
Subject: [PATCH 05/11] fix ingress bug becoming invalid when applying
zone-sync after ingress and update tests
---
internal/configs/ingress.go | 1 +
internal/configs/version1/__snapshots__/template_test.snap | 2 +-
internal/configs/version1/config.go | 1 +
internal/configs/version1/nginx-plus.ingress.tmpl | 4 ++--
internal/configs/version1/template_test.go | 2 +-
internal/configs/version2/__snapshots__/templates_test.snap | 2 +-
internal/configs/virtualserver_test.go | 6 +++---
pyproject.toml | 6 ++----
tests/suite/test_rl_ingress.py | 2 +-
tests/suite/test_rl_policies.py | 2 +-
tests/suite/test_rl_policies_vsr.py | 5 +++--
11 files changed, 17 insertions(+), 16 deletions(-)
diff --git a/internal/configs/ingress.go b/internal/configs/ingress.go
index 778a50b2ed..b9d388a761 100644
--- a/internal/configs/ingress.go
+++ b/internal/configs/ingress.go
@@ -280,6 +280,7 @@ func generateNginxCfg(p NginxCfgParams) (version1.IngressNginxConfig, Warnings)
DryRun: cfgParams.LimitReqDryRun,
LogLevel: cfgParams.LimitReqLogLevel,
RejectCode: cfgParams.LimitReqRejectCode,
+ Sync: cfgParams.ZoneSync.Enable,
}
if !limitReqZoneExists(limitReqZones, zoneName) {
rate := cfgParams.LimitReqRate
diff --git a/internal/configs/version1/__snapshots__/template_test.snap b/internal/configs/version1/__snapshots__/template_test.snap
index 202e4dfb02..6924d6b8ef 100644
--- a/internal/configs/version1/__snapshots__/template_test.snap
+++ b/internal/configs/version1/__snapshots__/template_test.snap
@@ -1217,7 +1217,7 @@ server {
# configuration for default/myingress
-limit_req_zone ${binary_remote_addr} zone=default/zone1:10m rate=200r/s sync;
+limit_req_zone ${binary_remote_addr} zone=default/zone1_sync:10m rate=200r/s sync;
diff --git a/internal/configs/version1/config.go b/internal/configs/version1/config.go
index 5ff1ee1d3f..ba600cdd9b 100644
--- a/internal/configs/version1/config.go
+++ b/internal/configs/version1/config.go
@@ -161,6 +161,7 @@ type LimitReq struct {
RejectCode int
DryRun bool
LogLevel string
+ Sync bool
}
// Location describes an NGINX location.
diff --git a/internal/configs/version1/nginx-plus.ingress.tmpl b/internal/configs/version1/nginx-plus.ingress.tmpl
index c31517cc0a..7b6b576ced 100644
--- a/internal/configs/version1/nginx-plus.ingress.tmpl
+++ b/internal/configs/version1/nginx-plus.ingress.tmpl
@@ -22,7 +22,7 @@ upstream {{$upstream.Name}} {
{{- end}}
{{range $limitReqZone := .LimitReqZones}}
-limit_req_zone {{ $limitReqZone.Key }} zone={{ $limitReqZone.Name }}:{{$limitReqZone.Size}} rate={{$limitReqZone.Rate}}{{- if $limitReqZone.Sync }} sync{{- end }};
+limit_req_zone {{ $limitReqZone.Key }}{{- if $limitReqZone.Sync }} zone={{ $limitReqZone.Name }}_sync:{{$limitReqZone.Size}}{{ else }} zone={{ $limitReqZone.Name }}:{{$limitReqZone.Size}} {{- end }} rate={{$limitReqZone.Rate}}{{- if $limitReqZone.Sync }} sync{{- end }};
{{end}}
{{range $server := .Servers}}
@@ -321,7 +321,7 @@ server {
{{- end}}
{{with $location.LimitReq}}
- limit_req zone={{ $location.LimitReq.Zone }} {{if $location.LimitReq.Burst}}burst={{$location.LimitReq.Burst}}{{end}} {{if $location.LimitReq.NoDelay}}nodelay{{else if $location.LimitReq.Delay}}delay={{$location.LimitReq.Delay}}{{end}};
+ limit_req zone={{- if $location.LimitReq.Sync }}{{ $location.LimitReq.Zone }}_sync {{ else }}{{ $location.LimitReq.Zone }}{{- end }} {{if $location.LimitReq.Burst}}burst={{$location.LimitReq.Burst}}{{end}} {{if $location.LimitReq.NoDelay}}nodelay{{else if $location.LimitReq.Delay}}delay={{$location.LimitReq.Delay}}{{end}};
{{if $location.LimitReq.DryRun}}limit_req_dry_run on;{{end}}
{{if $location.LimitReq.LogLevel}}limit_req_log_level {{$location.LimitReq.LogLevel}};{{end}}
{{if $location.LimitReq.RejectCode}}limit_req_status {{$location.LimitReq.RejectCode}};{{end}}
diff --git a/internal/configs/version1/template_test.go b/internal/configs/version1/template_test.go
index 31df58882b..901cbe8f40 100644
--- a/internal/configs/version1/template_test.go
+++ b/internal/configs/version1/template_test.go
@@ -1850,7 +1850,7 @@ func TestExecuteTemplate_ForIngressForNGINXPlusWithRequestRateLimitZoneSync(t *t
ingConf := buf.String()
wantDirectives := []string{
- "limit_req_zone ${binary_remote_addr} zone=default/zone1:10m rate=200r/s sync;",
+ "limit_req_zone ${binary_remote_addr} zone=default/zone1_sync:10m rate=200r/s sync;",
}
for _, want := range wantDirectives {
diff --git a/internal/configs/version2/__snapshots__/templates_test.snap b/internal/configs/version2/__snapshots__/templates_test.snap
index 78bcf86e5d..404cda1aba 100644
--- a/internal/configs/version2/__snapshots__/templates_test.snap
+++ b/internal/configs/version2/__snapshots__/templates_test.snap
@@ -2913,7 +2913,7 @@ map $http_x_version $match_0_0 {
default 0;
}
# HTTP snippet
-limit_req_zone $url zone=pol_rl_test_test_test:10m rate=10r/s sync;
+limit_req_zone $url zone=pol_rl_test_test_test_sync:10m rate=10r/s sync;
server {
listen 80 proxy_protocol;
diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go
index 958758e7c7..ad1353fed3 100644
--- a/internal/configs/virtualserver_test.go
+++ b/internal/configs/virtualserver_test.go
@@ -6524,7 +6524,7 @@ func TestGenerateVirtualServerConfigRateLimit(t *testing.T) {
VSNamespace: "default",
VSName: "cafe",
LimitReqs: []version2.LimitReq{
- {ZoneName: "pol_rl_default_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0},
+ {ZoneName: "pol_rl_default_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0, Sync: true},
},
LimitReqOptions: version2.LimitReqOptions{
DryRun: false,
@@ -6959,8 +6959,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
VSNamespace: "default",
VSName: "cafe",
LimitReqs: []version2.LimitReq{
- {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0},
- {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0},
+ {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0, Sync: true},
+ {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0, Sync: true},
},
LimitReqOptions: version2.LimitReqOptions{
DryRun: false,
diff --git a/pyproject.toml b/pyproject.toml
index be6af3c374..67110769ca 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -92,8 +92,6 @@ markers = [
"vsr_upstream",
"watch_namespace",
"wildcard_tls",
+ "alex",
]
-testpaths = [
- "tests",
- "perf-tests",
-]
+testpaths = ["tests", "perf-tests"]
diff --git a/tests/suite/test_rl_ingress.py b/tests/suite/test_rl_ingress.py
index 2c6572338c..265ca7910e 100644
--- a/tests/suite/test_rl_ingress.py
+++ b/tests/suite/test_rl_ingress.py
@@ -285,7 +285,7 @@ def test_ingress_rate_limit_scaled_with_zone_sync(
rate = ingress.metadata.annotations.get("nginx.org/limit-req-rate")
zone_size = ingress.metadata.annotations.get("nginx.org/limit-req-zone-size")
expected_conf_line = (
- f"limit_req_zone {key} zone={annotations_setup.namespace}/{ingress_name}:{zone_size} rate={rate} sync;"
+ f"limit_req_zone {key} zone={annotations_setup.namespace}/{ingress_name}_sync:{zone_size} rate={rate} sync;"
)
assert expected_conf_line in ing_config
diff --git a/tests/suite/test_rl_policies.py b/tests/suite/test_rl_policies.py
index 0b17cdb7f3..b48bbaf479 100644
--- a/tests/suite/test_rl_policies.py
+++ b/tests/suite/test_rl_policies.py
@@ -537,7 +537,7 @@ def test_rl_policy_with_scale_and_zone_sync(
)
policy = read_policy(kube_apis.custom_objects, test_namespace, pol_name)
- expected_conf_line = f"limit_req_zone {policy["spec"]["rateLimit"]["key"]} zone=pol_rl_{policy["metadata"]["namespace"].replace("-", "_", -1)}_{pol_name.replace("-", "_", -1)}_{virtual_server_setup.namespace.replace("-", "_", -1)}_{virtual_server_setup.vs_name.replace("-", "_", -1)}:{policy["spec"]["rateLimit"]["zoneSize"]} rate={policy["spec"]["rateLimit"]["rate"]} sync;"
+ expected_conf_line = f"limit_req_zone {policy["spec"]["rateLimit"]["key"]} zone=pol_rl_{policy["metadata"]["namespace"].replace("-", "_", -1)}_{pol_name.replace("-", "_", -1)}_{virtual_server_setup.namespace.replace("-", "_", -1)}_{virtual_server_setup.vs_name.replace("-", "_", -1)}_sync:{policy["spec"]["rateLimit"]["zoneSize"]} rate={policy["spec"]["rateLimit"]["rate"]} sync;"
assert expected_conf_line in vs_config
# revert changes
diff --git a/tests/suite/test_rl_policies_vsr.py b/tests/suite/test_rl_policies_vsr.py
index 243740ba64..1eed783b39 100644
--- a/tests/suite/test_rl_policies_vsr.py
+++ b/tests/suite/test_rl_policies_vsr.py
@@ -511,6 +511,7 @@ def test_rl_policy_5rs_with_zone_sync_vsr(
delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
@pytest.mark.skip_for_nginx_oss
+ @pytest.mark.alex # temp
@pytest.mark.parametrize("src", [rl_vsr_pri_sca_src])
def test_rl_policy_with_scale_and_zone_sync_vsr(
self,
@@ -541,7 +542,7 @@ def test_rl_policy_with_scale_and_zone_sync_vsr(
)
print("Step 2: apply the policy to the virtual server route")
- apply_and_assert_valid_vsr(
+ apply_and_assert_valid_vsr( # failing as it cannot find state?
kube_apis,
v_s_route_setup.route_m.namespace,
v_s_route_setup.route_m.name,
@@ -585,7 +586,7 @@ def test_rl_policy_with_scale_and_zone_sync_vsr(
)
policy = read_policy(kube_apis.custom_objects, v_s_route_setup.route_m.namespace, pol_name)
- expected_conf_line = f"limit_req_zone {policy["spec"]["rateLimit"]["key"]} zone=pol_rl_{policy["metadata"]["namespace"].replace("-", "_", -1)}_{pol_name.replace("-", "_", -1)}_{v_s_route_setup.route_m.namespace.replace("-", "_", -1)}_{v_s_route_setup.vs_name.replace("-", "_", -1)}:{policy["spec"]["rateLimit"]["zoneSize"]} rate={policy["spec"]["rateLimit"]["rate"]} sync;"
+ expected_conf_line = f"limit_req_zone {policy["spec"]["rateLimit"]["key"]} zone=pol_rl_{policy["metadata"]["namespace"].replace("-", "_", -1)}_{pol_name.replace("-", "_", -1)}_{v_s_route_setup.route_m.namespace.replace("-", "_", -1)}_{v_s_route_setup.vs_name.replace("-", "_", -1)}_sync:{policy["spec"]["rateLimit"]["zoneSize"]} rate={policy["spec"]["rateLimit"]["rate"]} sync;"
assert expected_conf_line in vsr_config
# revert changes
From 51976c7e9ced8131e24324ae031c5463a2696d47 Mon Sep 17 00:00:00 2001
From: Paul Abel
Date: Mon, 17 Mar 2025 10:02:03 +0000
Subject: [PATCH 06/11] updates to simplify zone name with sync
---
.../__snapshots__/templates_test.snap | 4 +-
internal/configs/version2/http.go | 3 +-
.../version2/nginx-plus.virtualserver.tmpl | 4 +-
internal/configs/version2/templates_test.go | 353 +++++++++++++++++-
internal/configs/virtualserver.go | 6 +-
internal/configs/virtualserver_test.go | 20 +-
6 files changed, 369 insertions(+), 21 deletions(-)
diff --git a/internal/configs/version2/__snapshots__/templates_test.snap b/internal/configs/version2/__snapshots__/templates_test.snap
index 404cda1aba..fd6a021670 100644
--- a/internal/configs/version2/__snapshots__/templates_test.snap
+++ b/internal/configs/version2/__snapshots__/templates_test.snap
@@ -2948,7 +2948,7 @@ server {
allow all;
limit_req_log_level error;
limit_req_status 503;
- limit_req zone=pol_rl_test_test_test burst=5 delay=10;
+ limit_req zone=pol_rl_test_test_test_sync burst=5 delay=10;
auth_jwt "My Api";
auth_jwt_key_file jwk-secret;
app_protect_enable on;
@@ -3021,7 +3021,7 @@ server {
deny all;
deny 127.0.0.1;
allow all;
- limit_req zone=loc_pol_rl_test_test_test;
+ limit_req zone=loc_pol_rl_test_test_test_sync;
proxy_ssl_certificate egress-mtls-secret.pem;
diff --git a/internal/configs/version2/http.go b/internal/configs/version2/http.go
index 23d9cc93e0..818546b557 100644
--- a/internal/configs/version2/http.go
+++ b/internal/configs/version2/http.go
@@ -408,11 +408,10 @@ type LimitReq struct {
Burst int
NoDelay bool
Delay int
- Sync bool
}
func (rl LimitReq) String() string {
- return fmt.Sprintf("{ZoneName %q, Burst %q, NoDelay %v, Delay %q, Sync %v}", rl.ZoneName, rl.Burst, rl.NoDelay, rl.Delay, rl.Sync)
+ return fmt.Sprintf("{ZoneName %q, Burst %q, NoDelay %v, Delay %q}", rl.ZoneName, rl.Burst, rl.NoDelay, rl.Delay)
}
// LimitReqOptions defines rate limit options.
diff --git a/internal/configs/version2/nginx-plus.virtualserver.tmpl b/internal/configs/version2/nginx-plus.virtualserver.tmpl
index ed368a8ff1..ee40851713 100644
--- a/internal/configs/version2/nginx-plus.virtualserver.tmpl
+++ b/internal/configs/version2/nginx-plus.virtualserver.tmpl
@@ -67,7 +67,7 @@ map {{ $m.Source }} {{ $m.Variable }} {
{{- end }}
{{- range $z := .LimitReqZones }}
-limit_req_zone {{ $z.Key }} {{- if $z.Sync }} zone={{ $z.ZoneName }}_sync:{{ $z.ZoneSize }}{{ else }} zone={{ $z.ZoneName }}:{{ $z.ZoneSize }}{{- end }} rate={{ $z.Rate }}{{- if $z.Sync }} sync{{- end }};
+limit_req_zone {{ $z.Key }} zone={{ $z.ZoneName }}:{{ $z.ZoneSize }} rate={{ $z.Rate }}{{- if $z.Sync }} sync{{- end }};
{{- end }}
{{- range $m := .StatusMatches }}
@@ -202,7 +202,7 @@ server {
{{- end }}
{{- range $rl := $s.LimitReqs }}
- limit_req {{- if $rl.Sync }} zone={{ $rl.ZoneName }}_sync{{ else }} zone={{ $rl.ZoneName }}{{ end }}{{ if $rl.Burst }} burst={{ $rl.Burst }}{{ end }}
+ limit_req zone={{ $rl.ZoneName }}{{ if $rl.Burst }} burst={{ $rl.Burst }}{{ end }}
{{- if $rl.Delay }} delay={{ $rl.Delay }}{{ end }}{{ if $rl.NoDelay }} nodelay{{ end }};
{{- end }}
diff --git a/internal/configs/version2/templates_test.go b/internal/configs/version2/templates_test.go
index 8a8afecf61..0af9011a5d 100644
--- a/internal/configs/version2/templates_test.go
+++ b/internal/configs/version2/templates_test.go
@@ -43,7 +43,7 @@ func newTmplExecutorNGINX(t *testing.T) *TemplateExecutor {
func TestVirtualServerForNginxPlus(t *testing.T) {
t.Parallel()
executor := newTmplExecutorNGINXPlus(t)
- data, err := executor.ExecuteVirtualServerTemplate(&virtualServerCfg)
+ data, err := executor.ExecuteVirtualServerTemplate(&virtualServerCfgPlus)
if err != nil {
t.Errorf("Failed to execute template: %v", err)
}
@@ -1147,7 +1147,7 @@ var (
virtualServerCfg = VirtualServerConfig{
LimitReqZones: []LimitReqZone{
{
- ZoneName: "pol_rl_test_test_test", Rate: "10r/s", ZoneSize: "10m", Key: "$url", Sync: true,
+ ZoneName: "pol_rl_test_test_test", Rate: "10r/s", ZoneSize: "10m", Key: "$url",
},
},
Upstreams: []Upstream{
@@ -1493,6 +1493,355 @@ var (
},
}
+ virtualServerCfgPlus = VirtualServerConfig{
+ LimitReqZones: []LimitReqZone{
+ {
+ ZoneName: "pol_rl_test_test_test_sync", Rate: "10r/s", ZoneSize: "10m", Key: "$url", Sync: true,
+ },
+ },
+ Upstreams: []Upstream{
+ {
+ Name: "test-upstream",
+ Servers: []UpstreamServer{
+ {
+ Address: "10.0.0.20:8001",
+ },
+ },
+ LBMethod: "random",
+ Keepalive: 32,
+ MaxFails: 4,
+ FailTimeout: "10s",
+ MaxConns: 31,
+ SlowStart: "10s",
+ UpstreamZoneSize: "256k",
+ Queue: &Queue{Size: 10, Timeout: "60s"},
+ SessionCookie: &SessionCookie{Enable: true, Name: "test", Path: "/tea", Expires: "25s"},
+ NTLM: true,
+ },
+ {
+ Name: "coffee-v1",
+ Servers: []UpstreamServer{
+ {
+ Address: "10.0.0.31:8001",
+ },
+ },
+ MaxFails: 8,
+ FailTimeout: "15s",
+ MaxConns: 2,
+ UpstreamZoneSize: "256k",
+ },
+ {
+ Name: "coffee-v2",
+ Servers: []UpstreamServer{
+ {
+ Address: "10.0.0.32:8001",
+ },
+ },
+ MaxFails: 12,
+ FailTimeout: "20s",
+ MaxConns: 4,
+ UpstreamZoneSize: "256k",
+ },
+ },
+ SplitClients: []SplitClient{
+ {
+ Source: "$request_id",
+ Variable: "$split_0",
+ Distributions: []Distribution{
+ {
+ Weight: "50%",
+ Value: "@loc0",
+ },
+ {
+ Weight: "50%",
+ Value: "@loc1",
+ },
+ },
+ },
+ },
+ Maps: []Map{
+ {
+ Source: "$match_0_0",
+ Variable: "$match",
+ Parameters: []Parameter{
+ {
+ Value: "~^1",
+ Result: "@match_loc_0",
+ },
+ {
+ Value: "default",
+ Result: "@match_loc_default",
+ },
+ },
+ },
+ {
+ Source: "$http_x_version",
+ Variable: "$match_0_0",
+ Parameters: []Parameter{
+ {
+ Value: "v2",
+ Result: "1",
+ },
+ {
+ Value: "default",
+ Result: "0",
+ },
+ },
+ },
+ },
+ HTTPSnippets: []string{"# HTTP snippet"},
+ Server: Server{
+ ServerName: "example.com",
+ StatusZone: "example.com",
+ ProxyProtocol: true,
+ SSL: &SSL{
+ HTTP2: true,
+ Certificate: "cafe-secret.pem",
+ CertificateKey: "cafe-secret.pem",
+ },
+ TLSRedirect: &TLSRedirect{
+ BasedOn: "$scheme",
+ Code: 301,
+ },
+ ServerTokens: "off",
+ SetRealIPFrom: []string{"0.0.0.0/0"},
+ RealIPHeader: "X-Real-IP",
+ RealIPRecursive: true,
+ Allow: []string{"127.0.0.1"},
+ Deny: []string{"127.0.0.1"},
+ LimitReqs: []LimitReq{
+ {
+ ZoneName: "pol_rl_test_test_test_sync",
+ Delay: 10,
+ Burst: 5,
+ },
+ },
+ LimitReqOptions: LimitReqOptions{
+ LogLevel: "error",
+ RejectCode: 503,
+ },
+ JWTAuth: &JWTAuth{
+ Realm: "My Api",
+ Secret: "jwk-secret",
+ },
+ IngressMTLS: &IngressMTLS{
+ ClientCert: "ingress-mtls-secret",
+ VerifyClient: "on",
+ VerifyDepth: 2,
+ },
+ WAF: &WAF{
+ ApPolicy: "/etc/nginx/waf/nac-policies/default-dataguard-alarm",
+ ApSecurityLogEnable: true,
+ Enable: "on",
+ ApLogConf: []string{"/etc/nginx/waf/nac-logconfs/default-logconf"},
+ },
+ Snippets: []string{"# server snippet"},
+ InternalRedirectLocations: []InternalRedirectLocation{
+ {
+ Path: "/split",
+ Destination: "@split_0",
+ },
+ {
+ Path: "/coffee",
+ Destination: "@match",
+ },
+ },
+ HealthChecks: []HealthCheck{
+ {
+ Name: "coffee",
+ URI: "/",
+ Interval: "5s",
+ Jitter: "0s",
+ Fails: 1,
+ Passes: 1,
+ Port: 50,
+ ProxyPass: "http://coffee-v2",
+ Mandatory: true,
+ Persistent: true,
+ KeepaliveTime: "60s",
+ IsGRPC: false,
+ },
+ {
+ Name: "tea",
+ Interval: "5s",
+ Jitter: "0s",
+ Fails: 1,
+ Passes: 1,
+ Port: 50,
+ ProxyPass: "http://tea-v2",
+ GRPCPass: "grpc://tea-v3",
+ GRPCStatus: createPointerFromInt(12),
+ GRPCService: "tea-servicev2",
+ IsGRPC: true,
+ },
+ },
+ Locations: []Location{
+ {
+ Path: "/",
+ Snippets: []string{"# location snippet"},
+ Allow: []string{"127.0.0.1"},
+ Deny: []string{"127.0.0.1"},
+ LimitReqs: []LimitReq{
+ {
+ ZoneName: "loc_pol_rl_test_test_test_sync",
+ },
+ },
+ ProxyConnectTimeout: "30s",
+ ProxyReadTimeout: "31s",
+ ProxySendTimeout: "32s",
+ ClientMaxBodySize: "1m",
+ ProxyBuffering: true,
+ ProxyBuffers: "8 4k",
+ ProxyBufferSize: "4k",
+ ProxyMaxTempFileSize: "1024m",
+ ProxyPass: "http://test-upstream",
+ ProxyNextUpstream: "error timeout",
+ ProxyNextUpstreamTimeout: "5s",
+ Internal: true,
+ ProxyPassRequestHeaders: false,
+ ProxyPassHeaders: []string{"Host"},
+ ProxyPassRewrite: "$request_uri",
+ ProxyHideHeaders: []string{"Header"},
+ ProxyIgnoreHeaders: "Cache",
+ Rewrites: []string{"$request_uri $request_uri", "$request_uri $request_uri"},
+ AddHeaders: []AddHeader{
+ {
+ Header: Header{
+ Name: "Header-Name",
+ Value: "Header Value",
+ },
+ Always: true,
+ },
+ },
+ EgressMTLS: &EgressMTLS{
+ Certificate: "egress-mtls-secret.pem",
+ CertificateKey: "egress-mtls-secret.pem",
+ VerifyServer: true,
+ VerifyDepth: 1,
+ Ciphers: "DEFAULT",
+ Protocols: "TLSv1.3",
+ TrustedCert: "trusted-cert.pem",
+ SessionReuse: true,
+ ServerName: true,
+ },
+ },
+ {
+ Path: "@loc0",
+ ProxyConnectTimeout: "30s",
+ ProxyReadTimeout: "31s",
+ ProxySendTimeout: "32s",
+ ClientMaxBodySize: "1m",
+ ProxyPass: "http://coffee-v1",
+ ProxyNextUpstream: "error timeout",
+ ProxyNextUpstreamTimeout: "5s",
+ ProxyInterceptErrors: true,
+ ErrorPages: []ErrorPage{
+ {
+ Name: "@error_page_1",
+ Codes: "400 500",
+ ResponseCode: 200,
+ },
+ {
+ Name: "@error_page_2",
+ Codes: "500",
+ ResponseCode: 0,
+ },
+ },
+ },
+ {
+ Path: "@loc1",
+ ProxyConnectTimeout: "30s",
+ ProxyReadTimeout: "31s",
+ ProxySendTimeout: "32s",
+ ClientMaxBodySize: "1m",
+ ProxyPass: "http://coffee-v2",
+ ProxyNextUpstream: "error timeout",
+ ProxyNextUpstreamTimeout: "5s",
+ },
+ {
+ Path: "@loc2",
+ ProxyConnectTimeout: "30s",
+ ProxyReadTimeout: "31s",
+ ProxySendTimeout: "32s",
+ ClientMaxBodySize: "1m",
+ ProxyPass: "http://coffee-v2",
+ GRPCPass: "grpc://coffee-v3",
+ },
+ {
+ Path: "@match_loc_0",
+ ProxyConnectTimeout: "30s",
+ ProxyReadTimeout: "31s",
+ ProxySendTimeout: "32s",
+ ClientMaxBodySize: "1m",
+ ProxyPass: "http://coffee-v2",
+ ProxyNextUpstream: "error timeout",
+ ProxyNextUpstreamTimeout: "5s",
+ },
+ {
+ Path: "@match_loc_default",
+ ProxyConnectTimeout: "30s",
+ ProxyReadTimeout: "31s",
+ ProxySendTimeout: "32s",
+ ClientMaxBodySize: "1m",
+ ProxyPass: "http://coffee-v1",
+ ProxyNextUpstream: "error timeout",
+ ProxyNextUpstreamTimeout: "5s",
+ },
+ {
+ Path: "/return",
+ ProxyInterceptErrors: true,
+ ErrorPages: []ErrorPage{
+ {
+ Name: "@return_0",
+ Codes: "418",
+ ResponseCode: 200,
+ },
+ },
+ InternalProxyPass: "http://unix:/var/lib/nginx/nginx-418-server.sock",
+ },
+ },
+ ErrorPageLocations: []ErrorPageLocation{
+ {
+ Name: "@vs_cafe_cafe_vsr_tea_tea_tea__tea_error_page_0",
+ DefaultType: "application/json",
+ Return: &Return{
+ Code: 200,
+ Text: "Hello World",
+ },
+ Headers: nil,
+ },
+ {
+ Name: "@vs_cafe_cafe_vsr_tea_tea_tea__tea_error_page_1",
+ DefaultType: "",
+ Return: &Return{
+ Code: 200,
+ Text: "Hello World",
+ },
+ Headers: []Header{
+ {
+ Name: "Set-Cookie",
+ Value: "cookie1=test",
+ },
+ {
+ Name: "Set-Cookie",
+ Value: "cookie2=test; Secure",
+ },
+ },
+ },
+ },
+ ReturnLocations: []ReturnLocation{
+ {
+ Name: "@return_0",
+ DefaultType: "text/html",
+ Return: Return{
+ Code: 200,
+ Text: "Hello!",
+ },
+ },
+ },
+ },
+ }
+
virtualServerCfgWithHTTP2On = VirtualServerConfig{
Server: Server{
ServerName: "example.com",
diff --git a/internal/configs/virtualserver.go b/internal/configs/virtualserver.go
index a79bf05ad8..1c4e11d5d6 100644
--- a/internal/configs/virtualserver.go
+++ b/internal/configs/virtualserver.go
@@ -1054,6 +1054,9 @@ func (p *policiesCfg) addRateLimitConfig(
l := nl.LoggerFromContext(p.Context)
rlZoneName := rfc1123ToSnake(fmt.Sprintf("pol_rl_%v_%v_%v_%v", policy.Namespace, policy.Name, ownerDetails.vsNamespace, ownerDetails.vsName))
+ if zoneSync {
+ rlZoneName = fmt.Sprintf("%v_sync", rlZoneName)
+ }
if rateLimit.Condition != nil && rateLimit.Condition.JWT.Claim != "" && rateLimit.Condition.JWT.Match != "" {
lrz, warningText := generateGroupedLimitReqZone(rlZoneName, policy, podReplicas, ownerDetails, zoneSync)
if warningText != "" {
@@ -1071,9 +1074,6 @@ func (p *policiesCfg) addRateLimitConfig(
}
p.RateLimit.Reqs = append(p.RateLimit.Reqs, generateLimitReq(rlZoneName, rateLimit))
- for i := range p.RateLimit.Zones {
- p.RateLimit.Reqs[i].Sync = p.RateLimit.Zones[i].Sync
- }
if len(p.RateLimit.Reqs) == 1 {
p.RateLimit.Options = generateLimitReqOptions(rateLimit)
} else {
diff --git a/internal/configs/virtualserver_test.go b/internal/configs/virtualserver_test.go
index ad1353fed3..cd5170afdd 100644
--- a/internal/configs/virtualserver_test.go
+++ b/internal/configs/virtualserver_test.go
@@ -6511,7 +6511,7 @@ func TestGenerateVirtualServerConfigRateLimit(t *testing.T) {
LimitReqZones: []version2.LimitReqZone{
{
Key: "$binary_remote_addr",
- ZoneName: "pol_rl_default_rate_limit_policy_default_cafe",
+ ZoneName: "pol_rl_default_rate_limit_policy_default_cafe_sync",
ZoneSize: "10M",
Rate: "10r/s",
Sync: true,
@@ -6524,7 +6524,7 @@ func TestGenerateVirtualServerConfigRateLimit(t *testing.T) {
VSNamespace: "default",
VSName: "cafe",
LimitReqs: []version2.LimitReq{
- {ZoneName: "pol_rl_default_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0, Sync: true},
+ {ZoneName: "pol_rl_default_rate_limit_policy_default_cafe_sync", Burst: 0, NoDelay: false, Delay: 0},
},
LimitReqOptions: version2.LimitReqOptions{
DryRun: false,
@@ -6853,7 +6853,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
Maps: []version2.Map{
{
Source: "$rl_default_cafe_group_user_type_tier",
- Variable: "$pol_rl_default_basic_rate_limit_policy_default_cafe",
+ Variable: "$pol_rl_default_basic_rate_limit_policy_default_cafe_sync",
Parameters: []version2.Parameter{
{
Value: "default",
@@ -6867,7 +6867,7 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
},
{
Source: "$rl_default_cafe_group_user_type_tier",
- Variable: "$pol_rl_default_premium_rate_limit_policy_default_cafe",
+ Variable: "$pol_rl_default_premium_rate_limit_policy_default_cafe_sync",
Parameters: []version2.Parameter{
{
Value: "default",
@@ -6928,8 +6928,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
HTTPSnippets: []string{},
LimitReqZones: []version2.LimitReqZone{
{
- Key: "$pol_rl_default_premium_rate_limit_policy_default_cafe",
- ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe",
+ Key: "$pol_rl_default_premium_rate_limit_policy_default_cafe_sync",
+ ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe_sync",
ZoneSize: "10M",
Rate: "10r/s",
PolicyResult: "$jwt_claim_sub",
@@ -6940,8 +6940,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
Sync: true,
},
{
- Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe",
- ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe",
+ Key: "$pol_rl_default_basic_rate_limit_policy_default_cafe_sync",
+ ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe_sync",
ZoneSize: "20M",
Rate: "20r/s",
PolicyResult: "$jwt_claim_sub",
@@ -6959,8 +6959,8 @@ func TestGenerateVirtualServerConfigRateLimitGroups(t *testing.T) {
VSNamespace: "default",
VSName: "cafe",
LimitReqs: []version2.LimitReq{
- {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0, Sync: true},
- {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe", Burst: 0, NoDelay: false, Delay: 0, Sync: true},
+ {ZoneName: "pol_rl_default_premium_rate_limit_policy_default_cafe_sync", Burst: 0, NoDelay: false, Delay: 0},
+ {ZoneName: "pol_rl_default_basic_rate_limit_policy_default_cafe_sync", Burst: 0, NoDelay: false, Delay: 0},
},
LimitReqOptions: version2.LimitReqOptions{
DryRun: false,
From af45a63df38d85b9395aee6370b4683ad07f586e Mon Sep 17 00:00:00 2001
From: Paul Abel
Date: Mon, 17 Mar 2025 10:45:48 +0000
Subject: [PATCH 07/11] update ingress tests
---
internal/configs/ingress.go | 4 ++-
internal/configs/ingress_test.go | 2 +-
.../version1/__snapshots__/template_test.snap | 27 +++++++++++++++++++
internal/configs/version1/config.go | 1 -
.../configs/version1/nginx-plus.ingress.tmpl | 4 +--
internal/configs/version1/template_test.go | 14 +++++++++-
6 files changed, 46 insertions(+), 6 deletions(-)
diff --git a/internal/configs/ingress.go b/internal/configs/ingress.go
index b9d388a761..123bf02afa 100644
--- a/internal/configs/ingress.go
+++ b/internal/configs/ingress.go
@@ -272,6 +272,9 @@ func generateNginxCfg(p NginxCfgParams) (version1.IngressNginxConfig, Warnings)
if cfgParams.LimitReqRate != "" {
zoneName := p.ingEx.Ingress.Namespace + "/" + p.ingEx.Ingress.Name
+ if p.ingEx.ZoneSync {
+ zoneName = fmt.Sprintf("%v_sync", zoneName)
+ }
loc.LimitReq = &version1.LimitReq{
Zone: zoneName,
Burst: cfgParams.LimitReqBurst,
@@ -280,7 +283,6 @@ func generateNginxCfg(p NginxCfgParams) (version1.IngressNginxConfig, Warnings)
DryRun: cfgParams.LimitReqDryRun,
LogLevel: cfgParams.LimitReqLogLevel,
RejectCode: cfgParams.LimitReqRejectCode,
- Sync: cfgParams.ZoneSync.Enable,
}
if !limitReqZoneExists(limitReqZones, zoneName) {
rate := cfgParams.LimitReqRate
diff --git a/internal/configs/ingress_test.go b/internal/configs/ingress_test.go
index 1f638e67ae..e89fb2ad7b 100644
--- a/internal/configs/ingress_test.go
+++ b/internal/configs/ingress_test.go
@@ -1173,7 +1173,7 @@ func TestGenerateNginxCfgForLimitReqZoneSync(t *testing.T) {
expectedZones := []version1.LimitReqZone{
{
- Name: "default/cafe-ingress",
+ Name: "default/cafe-ingress_sync",
Key: "${request_uri}",
Size: "11m",
Rate: "200r/s",
diff --git a/internal/configs/version1/__snapshots__/template_test.snap b/internal/configs/version1/__snapshots__/template_test.snap
index 6924d6b8ef..153829110d 100644
--- a/internal/configs/version1/__snapshots__/template_test.snap
+++ b/internal/configs/version1/__snapshots__/template_test.snap
@@ -1235,6 +1235,33 @@ server {
+ location / {
+ set $service "";
+ status_zone "";
+ proxy_http_version 1.1;
+
+ proxy_connect_timeout ;
+ proxy_read_timeout ;
+ proxy_send_timeout ;
+ client_max_body_size ;
+
+ proxy_set_header Host $host;
+ proxy_set_header X-Real-IP $remote_addr;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header X-Forwarded-Host $host;
+ proxy_set_header X-Forwarded-Port $server_port;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_buffering off;
+ proxy_pass http://test;
+
+
+ limit_req zone=default/myingress_sync burst=100;
+
+
+ limit_req_status 429;
+
+ }
+
}
---
diff --git a/internal/configs/version1/config.go b/internal/configs/version1/config.go
index ba600cdd9b..5ff1ee1d3f 100644
--- a/internal/configs/version1/config.go
+++ b/internal/configs/version1/config.go
@@ -161,7 +161,6 @@ type LimitReq struct {
RejectCode int
DryRun bool
LogLevel string
- Sync bool
}
// Location describes an NGINX location.
diff --git a/internal/configs/version1/nginx-plus.ingress.tmpl b/internal/configs/version1/nginx-plus.ingress.tmpl
index 7b6b576ced..94f30de87f 100644
--- a/internal/configs/version1/nginx-plus.ingress.tmpl
+++ b/internal/configs/version1/nginx-plus.ingress.tmpl
@@ -22,7 +22,7 @@ upstream {{$upstream.Name}} {
{{- end}}
{{range $limitReqZone := .LimitReqZones}}
-limit_req_zone {{ $limitReqZone.Key }}{{- if $limitReqZone.Sync }} zone={{ $limitReqZone.Name }}_sync:{{$limitReqZone.Size}}{{ else }} zone={{ $limitReqZone.Name }}:{{$limitReqZone.Size}} {{- end }} rate={{$limitReqZone.Rate}}{{- if $limitReqZone.Sync }} sync{{- end }};
+limit_req_zone {{ $limitReqZone.Key }} zone={{ $limitReqZone.Name }}:{{$limitReqZone.Size}} rate={{$limitReqZone.Rate}}{{- if $limitReqZone.Sync }} sync{{- end }};
{{end}}
{{range $server := .Servers}}
@@ -321,7 +321,7 @@ server {
{{- end}}
{{with $location.LimitReq}}
- limit_req zone={{- if $location.LimitReq.Sync }}{{ $location.LimitReq.Zone }}_sync {{ else }}{{ $location.LimitReq.Zone }}{{- end }} {{if $location.LimitReq.Burst}}burst={{$location.LimitReq.Burst}}{{end}} {{if $location.LimitReq.NoDelay}}nodelay{{else if $location.LimitReq.Delay}}delay={{$location.LimitReq.Delay}}{{end}};
+ limit_req zone={{ $location.LimitReq.Zone }}{{- if $location.LimitReq.Burst }} burst={{$location.LimitReq.Burst}}{{- end }}{{- if $location.LimitReq.NoDelay }} nodelay{{- else if $location.LimitReq.Delay }} delay={{$location.LimitReq.Delay}}{{- end }};
{{if $location.LimitReq.DryRun}}limit_req_dry_run on;{{end}}
{{if $location.LimitReq.LogLevel}}limit_req_log_level {{$location.LimitReq.LogLevel}};{{end}}
{{if $location.LimitReq.RejectCode}}limit_req_status {{$location.LimitReq.RejectCode}};{{end}}
diff --git a/internal/configs/version1/template_test.go b/internal/configs/version1/template_test.go
index 901cbe8f40..c4622a7264 100644
--- a/internal/configs/version1/template_test.go
+++ b/internal/configs/version1/template_test.go
@@ -1829,11 +1829,22 @@ func TestExecuteTemplate_ForIngressForNGINXPlusWithRequestRateLimitZoneSync(t *t
Servers: []Server{
{
Name: "test.example.com",
+ Locations: []Location{
+ {
+ Path: "/",
+ Upstream: testUpstream,
+ LimitReq: &LimitReq{
+ Zone: "default/myingress_sync",
+ Burst: 100,
+ RejectCode: 429,
+ },
+ },
+ },
},
},
LimitReqZones: []LimitReqZone{
{
- Name: "default/zone1",
+ Name: "default/zone1_sync",
Key: "${binary_remote_addr}",
Size: "10m",
Rate: "200r/s",
@@ -1851,6 +1862,7 @@ func TestExecuteTemplate_ForIngressForNGINXPlusWithRequestRateLimitZoneSync(t *t
wantDirectives := []string{
"limit_req_zone ${binary_remote_addr} zone=default/zone1_sync:10m rate=200r/s sync;",
+ "limit_req zone=default/myingress_sync burst=100;",
}
for _, want := range wantDirectives {
From 5d6e6f1f0f2d10123d5daa48f25235d756dff016 Mon Sep 17 00:00:00 2001
From: Paul Abel
Date: Mon, 17 Mar 2025 11:17:38 +0000
Subject: [PATCH 08/11] set correct ingress name for minion test
---
tests/suite/test_rl_ingress.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/tests/suite/test_rl_ingress.py b/tests/suite/test_rl_ingress.py
index 265ca7910e..c1ef67fb74 100644
--- a/tests/suite/test_rl_ingress.py
+++ b/tests/suite/test_rl_ingress.py
@@ -4,6 +4,7 @@
import requests
from settings import TEST_DATA
from suite.fixtures.fixtures import PublicEndpoint
+from suite.test_annotations import get_minions_info_from_yaml
from suite.utils.nginx_api_utils import (
check_synced_zone_exists,
wait_for_zone_sync_enabled,
@@ -278,8 +279,9 @@ def test_ingress_rate_limit_scaled_with_zone_sync(
ingress_controller_prerequisites.namespace,
)
ingress_name = annotations_setup.ingress_name
- # if annotations_setup.get("upstream_names") is not None:
- # print(f"upstream names: {annotations_setup.upstream_names}")
+ if "mergeable-scaled" in annotations_setup.ingress_src_file:
+ minions_info = get_minions_info_from_yaml(annotations_setup.ingress_src_file)
+ ingress_name = minions_info[0].get("name")
ingress = read_ingress(kube_apis.networking_v1, ingress_name, annotations_setup.namespace)
key = ingress.metadata.annotations.get("nginx.org/limit-req-key")
rate = ingress.metadata.annotations.get("nginx.org/limit-req-rate")
From c60b9537f0695a3f3a65d01343b7b54861a90f9a Mon Sep 17 00:00:00 2001
From: Paul Abel
Date: Mon, 17 Mar 2025 11:29:39 +0000
Subject: [PATCH 09/11] clean up test markers
---
pyproject.toml | 6 ++++--
tests/suite/test_rl_policies_vsr.py | 1 -
2 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 67110769ca..be6af3c374 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -92,6 +92,8 @@ markers = [
"vsr_upstream",
"watch_namespace",
"wildcard_tls",
- "alex",
]
-testpaths = ["tests", "perf-tests"]
+testpaths = [
+ "tests",
+ "perf-tests",
+]
diff --git a/tests/suite/test_rl_policies_vsr.py b/tests/suite/test_rl_policies_vsr.py
index 1eed783b39..9dafd58536 100644
--- a/tests/suite/test_rl_policies_vsr.py
+++ b/tests/suite/test_rl_policies_vsr.py
@@ -511,7 +511,6 @@ def test_rl_policy_5rs_with_zone_sync_vsr(
delete_policy(kube_apis.custom_objects, pol_name, v_s_route_setup.route_m.namespace)
@pytest.mark.skip_for_nginx_oss
- @pytest.mark.alex # temp
@pytest.mark.parametrize("src", [rl_vsr_pri_sca_src])
def test_rl_policy_with_scale_and_zone_sync_vsr(
self,
From 6efd1af963d4ae15922828ff8e65cc8180e95422 Mon Sep 17 00:00:00 2001
From: Paul Abel
Date: Mon, 17 Mar 2025 12:06:11 +0000
Subject: [PATCH 10/11] skip plus test in oss
---
tests/suite/test_rl_ingress.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/tests/suite/test_rl_ingress.py b/tests/suite/test_rl_ingress.py
index c1ef67fb74..79d733185f 100644
--- a/tests/suite/test_rl_ingress.py
+++ b/tests/suite/test_rl_ingress.py
@@ -243,6 +243,7 @@ def test_ingress_rate_limit_scaled(
scale_deployment(kube_apis.v1, kube_apis.apps_v1_api, "nginx-ingress", ns, 1)
+@pytest.mark.skip_for_nginx_oss
@pytest.mark.annotations
@pytest.mark.parametrize("annotations_setup", ["standard-scaled", "mergeable-scaled"], indirect=True)
class TestRateLimitIngressScaledWithZoneSync:
From 26206bfbfee9aa04fdcb0306a2f14c525447d971 Mon Sep 17 00:00:00 2001
From: Alex Fenlon
Date: Tue, 18 Mar 2025 16:33:03 +0000
Subject: [PATCH 11/11] remove comment as it works
---
tests/suite/test_rl_policies_vsr.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/suite/test_rl_policies_vsr.py b/tests/suite/test_rl_policies_vsr.py
index 9dafd58536..d10a04cc80 100644
--- a/tests/suite/test_rl_policies_vsr.py
+++ b/tests/suite/test_rl_policies_vsr.py
@@ -541,7 +541,7 @@ def test_rl_policy_with_scale_and_zone_sync_vsr(
)
print("Step 2: apply the policy to the virtual server route")
- apply_and_assert_valid_vsr( # failing as it cannot find state?
+ apply_and_assert_valid_vsr(
kube_apis,
v_s_route_setup.route_m.namespace,
v_s_route_setup.route_m.name,