diff --git a/Makefile b/Makefile
index 0aea955a405..172418ae58b 100644
--- a/Makefile
+++ b/Makefile
@@ -322,6 +322,7 @@ integration: ## Run integration tests against a real k8s cluster
./_integration/testsuite/install-service-apis.sh
./_integration/testsuite/install-contour-working.sh
./_integration/testsuite/install-fallback-certificate.sh
+ ./_integration/testsuite/install-ratelimit-service.sh
./_integration/testsuite/run-test-case.sh ./_integration/testsuite/httpproxy/*.yaml
./_integration/testsuite/cleanup.sh
diff --git a/_integration/testsuite/httpproxy/019-local-rate-limiting.yaml b/_integration/testsuite/httpproxy/019-local-rate-limiting.yaml
index 62cb5519f4e..a05cb541817 100644
--- a/_integration/testsuite/httpproxy/019-local-rate-limiting.yaml
+++ b/_integration/testsuite/httpproxy/019-local-rate-limiting.yaml
@@ -37,6 +37,21 @@ $apply:
---
+# Wait for the service to have endpoints before trying to make
+# a request.
+
+import data.contour.resources
+
+error_endpoints_not_ready[msg] {
+ ep := resources.get("endpoints", "echo")
+
+ not ep.subsets[0].addresses
+
+ msg := "endpoints for svc/ingress-conformance-echo are not ready"
+}
+
+---
+
# This proxy has a local rate limit on the virtual host.
apiVersion: projectcontour.io/v1
kind: HTTPProxy
@@ -55,21 +70,6 @@ spec:
port: 80
---
-# Wait for the service to have endpoints before trying to make
-# a request.
-
-import data.contour.resources
-
-error_endpoints_not_ready[msg] {
- ep := resources.get("endpoints", "echo")
-
- not ep.subsets[0].addresses
-
- msg := "endpoints for svc/ingress-conformance-echo are not ready"
-}
-
----
-
# Make a request against the proxy, confirm a 200 response
# is returned.
diff --git a/_integration/testsuite/httpproxy/020-global-rate-limiting.yaml b/_integration/testsuite/httpproxy/020-global-rate-limiting.yaml
new file mode 100644
index 00000000000..55dc0a41aa9
--- /dev/null
+++ b/_integration/testsuite/httpproxy/020-global-rate-limiting.yaml
@@ -0,0 +1,231 @@
+# Copyright Project Contour Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# This check depends on the `--watch=endpoints` argument being given
+# to integration-tester.
+
+---
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: ingress-conformance-echo
+$apply:
+ fixture:
+ as: echo
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: ingress-conformance-echo
+$apply:
+ fixture:
+ as: echo
+
+---
+
+# Create the HTTPProxy without rate limits first
+# and wait until we get a 200 from it before applying
+# rate limits and counting responses. This ensures
+# the pods are up and receiving traffic and prevents
+# the test from being flaky.
+
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: vhostratelimit
+spec:
+ virtualhost:
+ fqdn: vhostratelimit.projectcontour.io
+ routes:
+ - services:
+ - name: echo
+ port: 80
+---
+
+# Wait until we get a 200 from the proxy confirming
+# the pods are up and serving traffic.
+
+import data.contour.http.client
+import data.contour.http.client.url
+import data.contour.http.expect
+
+Response := client.Get({
+ "url": url.http("/"),
+ "headers": {
+ "Host": "vhostratelimit.projectcontour.io",
+ "User-Agent": client.ua("global-rate-limit"),
+ },
+})
+
+check_for_status_code [msg] {
+ msg := expect.response_status_is(Response, 200)
+}
+
+---
+
+
+# Add a global rate limit policy on the virtual host.
+
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: vhostratelimit
+spec:
+ virtualhost:
+ fqdn: vhostratelimit.projectcontour.io
+ rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - genericKey:
+ value: vhostlimit
+ routes:
+ - services:
+ - name: echo
+ port: 80
+---
+
+# Make a request against the proxy, confirm a 200 response
+# is returned since we're allowed one request per hour.
+
+import data.contour.http.client
+import data.contour.http.client.url
+import data.contour.http.expect
+
+Response := client.Get({
+ "url": url.http("/"),
+ "headers": {
+ "Host": "vhostratelimit.projectcontour.io",
+ "User-Agent": client.ua("global-rate-limit"),
+ },
+})
+
+check_for_status_code [msg] {
+ msg := expect.response_status_is(Response, 200)
+}
+
+---
+
+# Make another request against the proxy, confirm a 429
+# response is now gotten since we've exceeded the rate
+# limit.
+
+import data.contour.http.client
+import data.contour.http.client.url
+import data.contour.http.expect
+
+Response := client.Get({
+ "url": url.http("/"),
+ "headers": {
+ "Host": "vhostratelimit.projectcontour.io",
+ "User-Agent": client.ua("global-rate-limit"),
+ },
+})
+
+check_for_status_code [msg] {
+ msg := expect.response_status_is(Response, 429)
+}
+
+---
+
+# This proxy has a global rate limit on a route.
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ name: routeratelimit
+spec:
+ virtualhost:
+ fqdn: routeratelimit.projectcontour.io
+ routes:
+ - services:
+ - name: echo
+ port: 80
+ rateLimitPolicy:
+ global:
+ descriptors:
+ - entries:
+ - genericKey:
+ value: routelimit
+ - conditions:
+ - prefix: /unlimited
+ services:
+ - name: echo
+ port: 80
+---
+
+# Make a request against the proxy, confirm a 200 response
+# is returned since we're allowed one request per hour.
+
+import data.contour.http.client
+import data.contour.http.client.url
+import data.contour.http.expect
+
+Response := client.Get({
+ "url": url.http("/"),
+ "headers": {
+ "Host": "routeratelimit.projectcontour.io",
+ "User-Agent": client.ua("global-rate-limit"),
+ },
+})
+
+check_for_status_code [msg] {
+ msg := expect.response_status_is(Response, 200)
+}
+
+---
+
+# Make another request against the proxy, confirm a 429
+# response is now gotten since we've exceeded the rate
+# limit.
+
+import data.contour.http.client
+import data.contour.http.client.url
+import data.contour.http.expect
+
+Response := client.Get({
+ "url": url.http("/"),
+ "headers": {
+ "Host": "routeratelimit.projectcontour.io",
+ "User-Agent": client.ua("global-rate-limit"),
+ },
+})
+
+check_for_status_code [msg] {
+ msg := expect.response_status_is(Response, 429)
+}
+
+---
+
+# Make a request against the route that doesn't have
+# rate limiting to confirm we still get a 200 for that
+# route.
+
+import data.contour.http.client
+import data.contour.http.client.url
+import data.contour.http.expect
+
+Response := client.Get({
+ "url": url.http("/unlimited"),
+ "headers": {
+ "Host": "routeratelimit.projectcontour.io",
+ "User-Agent": client.ua("global-rate-limit"),
+ },
+})
+
+check_for_status_code [msg] {
+ msg := expect.response_status_is(Response, 200)
+}
diff --git a/_integration/testsuite/install-ratelimit-service.sh b/_integration/testsuite/install-ratelimit-service.sh
new file mode 100755
index 00000000000..a14a43e2641
--- /dev/null
+++ b/_integration/testsuite/install-ratelimit-service.sh
@@ -0,0 +1,90 @@
+#! /usr/bin/env bash
+
+# Copyright Project Contour Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# install-ratelimit-service.sh: Install a rate limit service and configuration
+# for Contour.
+
+set -o pipefail
+set -o errexit
+set -o nounset
+
+readonly KIND=${KIND:-kind}
+readonly KUBECTL=${KUBECTL:-kubectl}
+
+readonly WAITTIME=${WAITTIME:-5m}
+
+readonly HERE=$(cd $(dirname $0) && pwd)
+readonly REPO=$(cd ${HERE}/../.. && pwd)
+
+# Define some rate limiting policies to correspond to
+# testsuite/httpproxy/020-global-rate-limiting.yaml.
+${KUBECTL} apply -f - <
- This package holds the specification for the projectcontour.io Custom Resource Definitions (CRDs). Package v1 holds the specification for the projectcontour.io Custom Resource Definitions (CRDs). In building this CRD, we’ve inadvertently overloaded the word “Condition”, so we’ve tried to make
this spec clear as to which types of condition are which.
+(Appears on:
+RateLimitDescriptorEntry)
+
+ GenericKeyDescriptor defines a descriptor entry with a static key and
+value.projectcontour.io/v1
MatchConditions
are used by Routes
and Includes
to specify rules to match requests against for either
@@ -742,6 +742,87 @@ ExtensionServiceReferenc
+
GenericKeyDescriptor
+
+
Field | +Description | +
---|---|
+key
++ +string + + |
+
+(Optional)
+ Key defines the key of the descriptor entry. If not set, the +key is set to “generic_key”. + |
+
+value
++ +string + + |
+
+ Value defines the value of the descriptor entry. + |
+
+(Appears on: +RateLimitPolicy) +
++
GlobalRateLimitPolicy defines global rate limiting parameters.
+ +Field | +Description | +
---|---|
+descriptors
++ + +[]RateLimitDescriptor + + + |
+
+ Descriptors defines the list of descriptors that will +be generated and sent to the rate limit service. Each +descriptor contains 1+ key-value pair entries. + |
+
@@ -1536,6 +1617,107 @@
+(Appears on: +GlobalRateLimitPolicy) +
++
RateLimitDescriptor defines a list of key-value pair generators.
+ +Field | +Description | +
---|---|
+entries
++ + +[]RateLimitDescriptorEntry + + + |
+
+ Entries is the list of key-value pair generators. + |
+
+(Appears on: +RateLimitDescriptor) +
++
RateLimitDescriptorEntry is a key-value pair generator. Exactly +one field on this struct must be non-nil.
+ +Field | +Description | +
---|---|
+genericKey
++ + +GenericKeyDescriptor + + + |
+
+(Optional)
+ GenericKey defines a descriptor entry with a static key and value. + |
+
+requestHeader
++ + +RequestHeaderDescriptor + + + |
+
+(Optional)
+ RequestHeader defines a descriptor entry that’s populated only if +a given header is present on the request. The descriptor key is static, +and the descriptor value is equal to the value of the header. + |
+
+remoteAddress
++ + +RemoteAddressDescriptor + + + |
+
+(Optional)
+ RemoteAddress defines a descriptor entry with a key of “remote_address” +and a value equal to the client’s IP address (from x-forwarded-for). + |
+
@@ -1565,13 +1747,42 @@
Local defines local rate limiting parameters, i.e. parameters for rate limiting that occurs within each Envoy pod as requests are handled.
global
+Global defines global rate limiting parameters, i.e. parameters +defining descriptors that are sent to an external rate limit +service (RLS) for a rate limit decision on each request.
++(Appears on: +RateLimitDescriptorEntry) +
++
RemoteAddressDescriptor defines a descriptor entry with a key of +“remote_address” and a value equal to the client’s IP address +(from x-forwarded-for).
+@@ -1676,6 +1887,51 @@
+(Appears on: +RateLimitDescriptorEntry) +
++
RequestHeaderDescriptor defines a descriptor entry that’s populated only +if a given header is present on the request. The value of the descriptor +entry is equal to the value of the header (if present).
+ +Field | +Description | +
---|---|
+headerName
++ +string + + |
+
+ HeaderName defines the name of the header to look for on the request. + |
+
+descriptorKey
++ +string + + |
+
+ DescriptorKey defines the key to use on the descriptor entry. + |
+
string
alias)
diff --git a/site/docs/main/config/rate-limiting.md b/site/docs/main/config/rate-limiting.md
index dc325e200b7..0697965a7f0 100644
--- a/site/docs/main/config/rate-limiting.md
+++ b/site/docs/main/config/rate-limiting.md
@@ -1,17 +1,37 @@
# Rate Limiting
+- [Overview](#overview)
+- [Local Rate Limiting](#local-rate-limiting)
+- [Global Rate Limiting](#global-rate-limiting)
+
+## Overview
+
+Rate limiting is a means of protecting backend services against unwanted traffic.
+This can be useful for a variety of different scenarios:
+
+- Protecting against denial-of-service (DoS) attacks by malicious actors
+- Protecting against DoS incidents due to bugs in client applications/services
+- Enforcing usage quotas for different classes of clients, e.g. free vs. paid tiers
+- Controlling resource consumption/cost
+
+Envoy supports two forms of HTTP rate limiting: **local** and **global**.
+
+In local rate limiting, rate limits are enforced by each Envoy instance, without any communication with other Envoys or any external service.
+
+In global rate limiting, an external rate limit service (RLS) is queried by each Envoy via gRPC for rate limit decisions.
+
+Contour supports both forms of Envoy's rate limiting.
+
## Local Rate Limiting
The `HTTPProxy` API supports defining local rate limit policies that can be applied to either individual routes or entire virtual hosts.
Local rate limit policies define a maximum number of requests per unit of time that an Envoy should proxy to the upstream service.
Requests beyond the defined limit will receive a `429 (Too Many Requests)` response by default.
-Local rate limit policies program Envoy's [HTTP local rate limit filter](https://www.envoyproxy.io/docs/envoy/v1.17.0/configuration/http/http_filters/local_rate_limit_filter#config-http-filters-local-rate-limit).
+Local rate limit policies program Envoy's [HTTP local rate limit filter][1].
It's important to note that local rate limit policies apply *per Envoy pod*.
For example, a local rate limit policy of 100 requests per second for a given route will result in *each Envoy pod* allowing up to 100 requests per second for that route.
-By contrast, **global** rate limiting (which will be added in a future Contour release), uses a shared external rate limit service, allowing rate limits to apply across *all* Envoy pods.
-
### Defining a local rate limit
Local rate limit policies can be defined for either routes or virtual hosts. A local rate limit policy requires a `requests` and a `units` field, defining the *number of requests per unit of time* that are allowed. `Requests` must be a positive integer, and `units` can be `second`, `minute`, or `hour`. Optionally, a `burst` parameter can also be provided, defining the number of requests above the baseline rate that are allowed in a short period of time. This would allow occasional larger bursts of traffic not to be rate limited.
@@ -27,10 +47,10 @@ spec:
virtualhost:
fqdn: local.projectcontour.io
rateLimitPolicy:
- local:
- requests: 100
- unit: hour
- burst: 20
+ local:
+ requests: 100
+ unit: hour
+ burst: 20
routes:
- conditions:
- prefix: /s1
@@ -128,3 +148,118 @@ spec:
- name: x-contour-ratelimited
value: "true"
```
+
+## Global Rate Limiting
+
+The `HTTPProxy` API also supports defining global rate limit policies on routes and virtual hosts.
+
+In order to use global rate limiting, you must first select and deploy an external rate limit service (RLS).
+There is an [Envoy rate limit service implementation][2], but any service that implements the [RateLimitService gRPC interface][3] is supported.
+
+### Configuring an exernal RLS with Contour
+
+Once you have deployed your RLS, you must configure it with Contour.
+
+Define an extension service for it (substituting values as appropriate):
+```yaml
+apiVersion: projectcontour.io/v1alpha1
+kind: ExtensionService
+metadata:
+ namespace: projectcontour
+ name: ratelimit
+spec:
+ protocol: h2
+ services:
+ - name: ratelimit
+ port: 8081
+```
+
+Now add a reference to it in the Contour config file:
+```yaml
+rateLimitService:
+ extensionService: projectcontour/ratelimit
+ domain: contour
+ failOpen: true
+```
+
+### Defining a global rate limit policy
+
+Global rate limit policies can be defined for either routes or virtual hosts. Unlike local rate limit policies, global rate limit policies do not directly define a rate limit. Instead, they define a set of request descriptors that will be generated and sent to the external RLS for each request. The external RLS then makes the rate limit decision based on the descriptors and returns a response to Envoy.
+
+A global rate limit policy for the virtual host:
+```yaml
+apiVersion: projectcontour.io/v1
+kind: HTTPProxy
+metadata:
+ namespace: default
+ name: ratelimited-vhost
+spec:
+ virtualhost:
+ fqdn: local.projectcontour.io
+ rateLimitPolicy:
+ global:
+ descriptors:
+ # the first descriptor has a single key-value pair:
+ # [ remote_address=