Skip to content
This repository has been archived by the owner on Feb 9, 2022. It is now read-only.

Commit

Permalink
Increase nginx-ingress + oauth2_proxy to replicas=2
Browse files Browse the repository at this point in the history
nginx-ingress and oauth2_proxy are expected to be in the serving path,
and so should be redundant to single-machine failure (as per the BKPR
default HA target).

Accordingly:
- increase replicas for each to 2
- add (weak) host/zone/region anti-affinity
- configure a `PodDisruptionBudget` that ensures at least 1 replica of
  each is healthy before draining a node.

Fixes #373
  • Loading branch information
anguslees committed Mar 5, 2019
1 parent 8d27e53 commit 276f11b
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 2 deletions.
16 changes: 15 additions & 1 deletion manifests/components/nginx-ingress.jsonnet
Expand Up @@ -18,6 +18,7 @@
*/

local kube = import "../lib/kube.libsonnet";
local utils = import "../lib/utils.libsonnet";

local NGNIX_INGRESS_IMAGE = (import "images.json")["nginx-ingress-controller"];

Expand Down Expand Up @@ -141,11 +142,23 @@ local NGNIX_INGRESS_IMAGE = (import "images.json")["nginx-ingress-controller"];

hpa: kube.HorizontalPodAutoscaler($.p + "nginx-ingress-controller") + $.metadata {
target: $.controller,
spec+: {maxReplicas: 10},
spec+: {
// Put a cap on growth due to (eg) DoS attacks.
// Large sites will want to increase this to cover legitimate demand.
maxReplicas: 10,
},
},

pdb: kube.PodDisruptionBudget($.p + "nginx-ingress-controller") + $.metadata {
target_pod: $.controller.spec.template,
spec+: {minAvailable: $.controller.spec.replicas - 1},
},

controller: kube.Deployment($.p + "nginx-ingress-controller") + $.metadata {
local this = self,
spec+: {
// Ensure at least n+1. NB: HPA will increase replicas dynamically.
replicas: 2,
template+: {
metadata+: {
annotations+: {
Expand All @@ -158,6 +171,7 @@ local NGNIX_INGRESS_IMAGE = (import "images.json")["nginx-ingress-controller"];
serviceAccountName: $.serviceAccount.metadata.name,
//hostNetwork: true, // access real source IPs, IPv6, etc
terminationGracePeriodSeconds: 60,
affinity+: utils.weakNodeDiversity(this.spec.selector),
containers_+: {
default: kube.Container("nginx") {
image: NGNIX_INGRESS_IMAGE,
Expand Down
16 changes: 15 additions & 1 deletion manifests/components/oauth2-proxy.jsonnet
Expand Up @@ -18,6 +18,7 @@
*/

local kube = import "../lib/kube.libsonnet";
local utils = import "../lib/utils.libsonnet";

local OAUTH2_PROXY_IMAGE = (import "images.json")["oauth2_proxy"];

Expand All @@ -44,13 +45,26 @@ local OAUTH2_PROXY_IMAGE = (import "images.json")["oauth2_proxy"];

hpa: kube.HorizontalPodAutoscaler($.p + "oauth2-proxy") + $.metadata {
target: $.deploy,
spec+: {maxReplicas: 10},
spec+: {
// Put a cap on growth due to (eg) DoS attacks.
// Large sites will want to increase this to cover legitimate demand.
maxReplicas: 10,
},
},

pdb: kube.PodDisruptionBudget($.p + "oauth2-proxy") + $.metadata {
target_pod: $.deploy.spec.template,
spec+: {minAvailable: $.deploy.spec.replicas - 1},
},

deploy: kube.Deployment($.p + "oauth2-proxy") + $.metadata {
local this = self,
spec+: {
// Ensure at least n+1. NB: HPA will increase replicas dynamically.
replicas: 2,
template+: {
spec+: {
affinity+: utils.weakNodeDiversity(this.spec.selector),
containers_+: {
proxy: kube.Container("oauth2-proxy") {
image: OAUTH2_PROXY_IMAGE,
Expand Down
18 changes: 18 additions & 0 deletions manifests/lib/utils.libsonnet
Expand Up @@ -45,6 +45,24 @@ local kube = import "kube.libsonnet";
std.join(".", tail)
),

// affinity=weakNodeDiversity to Try to spread across separate
// nodes/zones (for fault-tolerance)
weakNodeDiversity(selector):: {
podAntiAffinity+: {
preferredDuringSchedulingIgnoredDuringExecution+: [{
weight: 70,
podAffinityTerm: {
labelSelector: selector,
topologyKey: k,
},
} for k in [
"kubernetes.io/hostname",
"failure-domain.beta.kubernetes.io/zone",
"failure-domain.beta.kubernetes.io/region",
]],
},
},

TlsIngress(name):: kube.Ingress(name) {
local this = self,
metadata+: {
Expand Down

0 comments on commit 276f11b

Please sign in to comment.