Skip to content
Permalink
Browse files

Merge pull request #60 from bookingcom/isutton/admission-webhooks

Validating webhook
  • Loading branch information...
juliogreff committed May 3, 2019
2 parents 0bfbaa8 + 3c72f91 commit 04d2fe3d2476fd4b7dd6c5f7d883add6601c36d5
@@ -1,7 +1,10 @@
.testCoverage.txt
#shipper
e2e.test
.vscode/
pkg/chart/testdata/chartmuseum
pkg/chart/testdata/index.yaml
docs/generated
docs/generated

# Binaries generated when building Shipper
./shipper
./shipper-state-metrics
./e2e.test
@@ -1,5 +1,4 @@
ARG BASE_IMAGE=alpine:3.8
FROM ${BASE_IMAGE}
FROM alpine:3.8
LABEL authors="Parham Doustdar <parham.doustdar@booking.com>, Alexey Surikov <alexey.surikov@booking.com>, Igor Sutton <igor.sutton@booking.com>, Ben Tyler <benjamin.tyler@booking.com>"
RUN apk add ca-certificates
ADD shipper /bin/shipper
@@ -1,6 +1,5 @@
ARG BASE_IMAGE=alpine:3.8
FROM ${BASE_IMAGE}
FROM alpine:3.8
LABEL authors="Parham Doustdar <parham.doustdar@booking.com>, Alexey Surikov <alexey.surikov@booking.com>, Igor Sutton <igor.sutton@booking.com>, Ben Tyler <benjamin.tyler@booking.com>"
RUN apk add ca-certificates
ADD shipper-state-metrics /bin/shipper-state-metrics
ENTRYPOINT ["shipper-state-metrics", "-v", "2"]
ENTRYPOINT ["shipper-state-metrics", "-v", "2"]

Some generated files are not rendered by default. Learn more.

Oops, something went wrong.
@@ -0,0 +1,24 @@
SHIPPER_IMAGE = bookingcom/shipper:latest
METRICS_IMAGE = bookingcom/shipper-state-metrics:latest
SHIPPER_NAMESPACE = shipper-system
KUBECTL = kubectl -n $(SHIPPER_NAMESPACE)

.PHONY: shipper

shipper:
GOARCH=amd64 CGO_ENABLED=0 GOOS=linux go build -o shipper ./cmd/shipper/*.go
docker build -f Dockerfile.shipper -t $SHIPPER_IMAGE --build-arg HTTP_PROXY=$(HTTP_PROXY) --build-arg HTTPS_PROXY=$(HTTPS_PROXY) .
docker push $SHIPPER_IMAGE

shipper-state-metrics:
GOARCH=amd64 CGO_ENABLED=0 GOOS=linux go build -o shipper-state-metrics ./cmd/shipper-state-metrics/*.go
docker build -f Dockerfile.shipper-state-metrics -t $METRICS_IMAGE --build-arg HTTP_PROXY=$(HTTP_PROXY) --build-arg HTTPS_PROXY=$(HTTPS_PROXY) .
docker push $METRICS_IMAGE

restart:
# Delete all Pods in namespace, to force the ReplicaSet to spawn new ones
# with the new latest image (assuming that imagePullPolicy is set to Always).
$(KUBECTL) delete pods --all

logs:
$(KUBECTL) get po -o jsonpath='{.items[*].metadata.name}' | xargs $(KUBECTL) logs --follow
@@ -22,7 +22,7 @@ echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
docker build -t bookingcom/shipper:$TRAVIS_COMMIT -f Dockerfile.shipper .
docker push bookingcom/shipper:$TRAVIS_COMMIT

docker build -t bookingcom/shipper-state-metrics:$TRAVIS_COMMIT -f Dockerfile.metrics .
docker build -t bookingcom/shipper-state-metrics:$TRAVIS_COMMIT -f Dockerfile.shipper-state-metrics .
docker push bookingcom/shipper-state-metrics:$TRAVIS_COMMIT

# building a tagged release
@@ -42,6 +42,7 @@ import (
"github.com/bookingcom/shipper/pkg/controller/traffic"
"github.com/bookingcom/shipper/pkg/metrics/instrumentedclient"
shippermetrics "github.com/bookingcom/shipper/pkg/metrics/prometheus"
"github.com/bookingcom/shipper/pkg/webhook"
)

var controllers = []string{
@@ -52,6 +53,7 @@ var controllers = []string{
"capacity",
"traffic",
"janitor",
"webhook",
}

const defaultRESTTimeout time.Duration = 10 * time.Second
@@ -70,6 +72,10 @@ var (
chartCacheDir = flag.String("cachedir", filepath.Join(os.TempDir(), "chart-cache"), "location for the local cache of downloaded charts")
resync = flag.Duration("resync", defaultResync, "Informer's cache re-sync in Go's duration format.")
restTimeout = flag.Duration("rest-timeout", defaultRESTTimeout, "Timeout value for management and target REST clients. Does not affect informer watches.")
webhookCertPath = flag.String("webhook-cert", "", "Path to the TLS certificate for the webhook controller.")
webhookKeyPath = flag.String("webhook-key", "", "Path to the TLS private key for the webhook controller.")
webhookBindAddr = flag.String("webhook-addr", "0.0.0.0", "Addr to bind the webhook controller.")
webhookBindPort = flag.String("webhook-port", "9443", "Port to bind the webhook controller.")
)

type metricsCfg struct {
@@ -99,6 +105,9 @@ type cfg struct {
ns string
workers int

webhookCertPath, webhookKeyPath string
webhookBindAddr, webhookBindPort string

wg *sync.WaitGroup
stopCh <-chan struct{}

@@ -201,6 +210,11 @@ func main() {
ns: *ns,
workers: *workers,

webhookCertPath: *webhookCertPath,
webhookKeyPath: *webhookKeyPath,
webhookBindAddr: *webhookBindAddr,
webhookBindPort: *webhookBindPort,

wg: wg,
stopCh: stopCh,

@@ -355,6 +369,7 @@ func buildInitializers() map[string]initFunc {
controllers["capacity"] = startCapacityController
controllers["traffic"] = startTrafficController
controllers["janitor"] = startJanitorController
controllers["webhook"] = startWebhook
return controllers
}

@@ -507,6 +522,23 @@ func startTrafficController(cfg *cfg) (bool, error) {
return true, nil
}

func startWebhook(cfg *cfg) (bool, error) {
enabled := cfg.enabledControllers["webhook"]
if !enabled {
return false, nil
}

c := webhook.NewWebhook(cfg.webhookBindAddr, cfg.webhookBindPort, cfg.webhookKeyPath, cfg.webhookCertPath)

cfg.wg.Add(1)
go func() {
c.Run(cfg.stopCh)
cfg.wg.Done()
}()

return true, nil
}

func startJanitorController(cfg *cfg) (bool, error) {
enabled := cfg.enabledControllers["janitor"]
if !enabled {
@@ -0,0 +1,96 @@
#!/bin/bash
while [[ $# -gt 0 ]]; do
case ${1} in
--service)
service="$2"
shift
;;
--secret)
secret="$2"
shift
;;
--namespace)
namespace="$2"
shift
;;
esac
shift
done

[[ -z ${service} ]] && service=shipper-validating-webhook
[[ -z ${secret} ]] && secret=shipper-validating-webhook
[[ -z ${namespace} ]] && namespace=shipper-system

csrName=${service}.${namespace}
tmpdir=$(mktemp -d)
echo "creating certs in tmpdir ${tmpdir} "

cat <<EOF >> ${tmpdir}/csr.conf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = ${service}
DNS.2 = ${service}.${namespace}
DNS.3 = ${service}.${namespace}.svc
EOF

openssl genrsa -out ${tmpdir}/server-key.pem 2048
openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf

# clean-up any previously created CSR for our service. Ignore errors if not present.
kubectl delete csr ${csrName} 2>/dev/null || true

# create server cert/key CSR and send to k8s API
cat <<EOF | kubectl create -f -
apiVersion: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
metadata:
name: ${csrName}
spec:
groups:
- system:authenticated
request: $(cat ${tmpdir}/server.csr | base64 | tr -d '\n')
usages:
- digital signature
- key encipherment
- server auth
EOF

# verify CSR has been created
while true; do
kubectl get csr ${csrName}
if [[ "$?" -eq 0 ]]; then
break
fi
done

# approve and fetch the signed certificate
kubectl certificate approve ${csrName}
# verify certificate has been signed
for x in $(seq 10); do
serverCert=$(kubectl get csr ${csrName} -o jsonpath='{.status.certificate}')
if [[ ${serverCert} != '' ]]; then
break
fi
sleep 1
done
if [[ ${serverCert} == '' ]]; then
echo "ERROR: After approving csr ${csrName}, the signed certificate did not appear on the resource. Giving up after 10 attempts." >&2
exit 1
fi
echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem


# create the secret with CA cert and server cert/key
kubectl create secret generic ${secret} \
--from-file=key.pem=${tmpdir}/server-key.pem \
--from-file=cert.pem=${tmpdir}/server-cert.pem \
--dry-run -o yaml |
kubectl -n ${namespace} apply -f -
@@ -0,0 +1,14 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail

ROOT=$(cd $(dirname $0)/../../; pwd)

export CA_BUNDLE=$(kubectl get configmap -n kube-system extension-apiserver-authentication -o=jsonpath='{.data.client-ca-file}' | base64 | tr -d '\n')

if command -v envsubst >/dev/null 2>&1; then
envsubst
else
sed -e "s|\${CA_BUNDLE}|${CA_BUNDLE}|g"
fi
@@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: shipper
namespace: shipper-system
labels:
app: shipper
spec:
selector:
matchLabels:
app: shipper
template:
metadata:
labels:
app: shipper
spec:
containers:
- name: shipper
image: bookingcom/shipper:latest
imagePullPolicy: Always
args:
- "-webhook-cert"
- "/etc/webhook/certs/tls.crt"
- "-webhook-key"
- "/etc/webhook/certs/tls.key"
- "-webhook-port"
- "9443"
ports:
- containerPort: 9443
volumeMounts:
- mountPath: /etc/webhook/certs
name: webhook-certs
readOnly: true
volumes:
- name: webhook-certs
secret:
secretName: shipper-validating-webhook
@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: shipper-validating-webhook
namespace: shipper-system
labels:
app: shipper
spec:
selector:
app: shipper
ports:
- port: 443
protocol: TCP
targetPort: 9443
@@ -0,0 +1,30 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: shipper-webhook-configuration
labels:
app: shipper
webhooks:
- clientConfig:
service:
name: shipper-webhook-svc
namespace: shipper-system
path: "/validate"
caBundle: "${CA_BUNDLE}"
name: shipper-webhook.booking.com
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- "shipper.booking.com"
apiVersions:
- v1alpha1
resources:
- applications
- releases
- installationtargets
- capacitytargets
- traffictargets
- clusters
failurePolicy: "Fail"
Oops, something went wrong.

0 comments on commit 04d2fe3

Please sign in to comment.
You can’t perform that action at this time.