Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Network policy template #62

Merged
merged 18 commits into from Oct 21, 2023
Merged
13 changes: 11 additions & 2 deletions .github/workflows/ci.yml
Expand Up @@ -145,7 +145,7 @@ jobs:
- uses: actions/checkout@v3
- uses: nolar/setup-k3d-k3s@v1
with:
version: v1.26
version: v1.27
k3d-name: kube
k3d-args: "--no-lb --no-rollback --k3s-arg --disable=traefik,servicelb,metrics-server@server:*"
- run: kubectl apply -f yaml/crd.yaml
Expand All @@ -158,11 +158,20 @@ jobs:
path: /tmp
- name: Load docker image from tarball
run: docker load --input /tmp/image.tar
- run: helm template charts/doc-controller --set version="latest" | kubectl apply -f -
- name: helm template | kubctl apply
run: |
apiserver="$(kubectl get endpoints kubernetes -ojson | jq '.subsets[0].addresses[0].ip' -r)"
helm template charts/doc-controller \
--set version=latest \
--set networkPolicy.enabled=true \
--set networkPolicy.apiserver.0=${apiserver}/32 \
| kubectl apply -f -
- run: kubectl wait --for=condition=available deploy/doc-controller --timeout=30s
- run: kubectl apply -f yaml/instance-samuel.yaml
- run: sleep 2 # TODO: add condition on status and wait for it instead
# verify reconcile actions have happened
- run: kubectl get netpol doc-controller -oyaml
- run: kubectl logs deploy/doc-controller
- run: kubectl get event --field-selector "involvedObject.kind=Document,involvedObject.name=samuel" | grep "HideRequested"
- run: kubectl get doc -oyaml | grep -A1 finalizers | grep documents.kube.rs

Expand Down
2 changes: 1 addition & 1 deletion charts/doc-controller/templates/deployment.yaml
Expand Up @@ -45,7 +45,7 @@ spec:
value: {{ .Values.logging.env_filter }}
{{- if .Values.tracing.enabled }}
- name: OPENTELEMETRY_ENDPOINT_URL
value: {{ .Values.tracing.endpoint }}
value: https://{{ .Values.tracing.service }}.{{ .Values.tracing.namespace }}.cluster.local:{{ .Values.tracing.port }}
{{- end }}
{{- with .Values.env }}
{{- toYaml . | nindent 8 }}
Expand Down
69 changes: 69 additions & 0 deletions charts/doc-controller/templates/networkpolicy.yaml
@@ -0,0 +1,69 @@
{{- if .Values.networkPolicy.enabled }}
---
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ include "controller.fullname" . }}
namespace: {{ .Values.namespace }}
labels:
{{- include "controller.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "controller.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
- Egress
egress:
{{- if .Values.tracing.enabled }}
# pushing tracing spans to a collector
- to:
- namespaceSelector:
matchLabels:
name: {{.Values.tracing.namespace }}
ports:
- port: {{ .Values.tracing.port }}
protocol: TCP
{{- end }}

# Kubernetes apiserver access
- to:
- ipBlock:
{{- range .Values.networkPolicy.apiserver }}
cidr: {{ . }}
{{- end }}
ports:
- port: 443
protocol: TCP
- port: 6443
protocol: TCP

{{- if .Values.networkPolicy.dns }}
# DNS egress
- to:
- podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- port: 53
protocol: UDP
{{- end }}

ingress:
{{- with .Values.networkPolicy.prometheus }}
{{- if .enabled }}
# prometheus metrics scraping support
- from:
- namespaceSelector:
matchLabels:
name: {{ .namespace }}
podSelector:
matchLabels:
app: {{ .app }}
ports:
- port: {{ .port }}
protocol: TCP
{{- end }}
{{- end }}

{{- end }}
6 changes: 4 additions & 2 deletions charts/doc-controller/templates/rbac.yaml
@@ -1,3 +1,4 @@
{{- if .Values.serviceAccount.create }}
---
# Scoped service account
apiVersion: v1
Expand All @@ -12,6 +13,7 @@ metadata:
{{- end }}
namespace: {{ .Values.namespace }}
automountServiceAccountToken: true
{{- end }}

---
# Access for the service account
Expand All @@ -21,8 +23,8 @@ metadata:
name: {{ include "controller.fullname" . }}
rules:
- apiGroups: ["kube.rs"]
resources: ["documents", "documents/status"]
verbs: ["get", "list", "watch", "patch"]
resources: ["documents", "documents/status", "documents/finalizers"]
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create"]
Expand Down
2 changes: 1 addition & 1 deletion charts/doc-controller/templates/servicemonitor.yaml
Expand Up @@ -34,7 +34,7 @@ spec:
jobLabel: {{ include "controller.fullname" . }}
selector:
matchLabels:
app: {{ include "controller.fullname" . }}
{{- include "controller.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Values.namespace }}
Expand Down
25 changes: 22 additions & 3 deletions charts/doc-controller/values.yaml
Expand Up @@ -10,6 +10,7 @@ image:
imagePullSecrets: []

serviceAccount:
create: true
annotations: {}
podAnnotations: {}

Expand All @@ -23,10 +24,28 @@ securityContext: {}
# runAsNonRoot: true
# runAsUser: 1000

# Enable the feature-flagged opentelemetry trace layer pushing over grpc
# Configure the gRPC opentelemetry push url
tracing:
enabled: false # prefixes tag with otel
endpoint: "https://promstack-tempo.monitoring.svc.cluster.local:4317"
# Use the telemetry built image and inject OPENTELEMETRY_ENDPOINT_URL
enabled: false
# namespace of the collector
namespace: monitoring
# collector service name
service: promstack-tempo
# collector port for OTLP gRPC
port: 4317

networkPolicy:
enabled: true
dns: true
# apiserver access: please scope; take addresses from "kubectl get endpoints kubernetes -n default"
apiserver:
- "0.0.0.0/0" # extremely wide-open egress on ports 443 + 6443
prometheus:
enabled: true
namespace: monitoring
app: prometheus
port: http

logging:
env_filter: info,kube=debug,controller=debug
Expand Down
54 changes: 52 additions & 2 deletions yaml/deployment.yaml
@@ -1,4 +1,54 @@
---
# Source: doc-controller/templates/networkpolicy.yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: doc-controller
namespace: default
labels:
app: doc-controller
app.kubernetes.io/name: doc-controller
app.kubernetes.io/version: "0.12.10"
spec:
podSelector:
matchLabels:
app: doc-controller
policyTypes:
- Ingress
- Egress
egress:

# Kubernetes apiserver access
- to:
- ipBlock:
cidr: 0.0.0.0/0
ports:
- port: 443
protocol: TCP
- port: 6443
protocol: TCP
# DNS egress
- to:
- podSelector:
matchLabels:
k8s-app: kube-dns
ports:
- port: 53
protocol: UDP

ingress:
# prometheus metrics scraping support
- from:
- namespaceSelector:
matchLabels:
name: monitoring
podSelector:
matchLabels:
app: prometheus
ports:
- port: http
protocol: TCP
---
# Source: doc-controller/templates/rbac.yaml
# Scoped service account
apiVersion: v1
Expand All @@ -20,8 +70,8 @@ metadata:
name: doc-controller
rules:
- apiGroups: ["kube.rs"]
resources: ["documents", "documents/status"]
verbs: ["get", "list", "watch", "patch"]
resources: ["documents", "documents/status", "documents/finalizers"]
verbs: ["get", "list", "watch", "patch", "update"]
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["create"]
Expand Down