-
Notifications
You must be signed in to change notification settings - Fork 277
/
vault.bats
312 lines (229 loc) · 12 KB
/
vault.bats
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
#!/usr/bin/env bats
load helpers
BATS_TESTS_DIR=test/bats/tests/vault
WAIT_TIME=120
SLEEP_TIME=1
export LABEL_VALUE=${LABEL_VALUE:-"test"}
export ANNOTATION_VALUE=${ANNOTATION_VALUE:-"app=test"}
@test "install vault provider" {
# create the ns vault
kubectl create ns vault
# install the vault provider using the helm charts
# pinning this to a fixed version (1.7.0)
helm repo add hashicorp https://helm.releases.hashicorp.com
helm repo update
helm install vault hashicorp/vault --namespace=vault \
--set "server.dev.enabled=true" \
--set "injector.enabled=false" \
--set "csi.enabled=true"
# wait for vault and vault-csi-provider pods to be running
kubectl wait --for=condition=Ready --timeout=120s pods --all -n vault
}
@test "setup vault" {
# create the kv pair in vault
kubectl exec vault-0 --namespace=vault -- vault kv put secret/foo bar=hello
kubectl exec vault-0 --namespace=vault -- vault kv put secret/foo1 bar1=hello1
# enable authentication
kubectl exec vault-0 --namespace=vault -- vault auth enable kubernetes
local token_reviewer_jwt="$(kubectl exec vault-0 --namespace=vault -- cat /var/run/secrets/kubernetes.io/serviceaccount/token)"
local kubernetes_service_ip="$(kubectl get svc kubernetes -o go-template="{{ .spec.clusterIP }}")"
# enable authentication using the kubernetes service token from vault pod
kubectl exec -i vault-0 --namespace=vault -- vault write auth/kubernetes/config \
issuer="https://kubernetes.default.svc.cluster.local" \
token_reviewer_jwt="${token_reviewer_jwt}" \
kubernetes_host="https://${kubernetes_service_ip}:443" \
kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# create vault policy to allow access to created secrets
kubectl exec -i vault-0 --namespace=vault -- vault policy write csi -<<EOF
path "secret/data/*" {
capabilities = ["read"]
}
EOF
# create authentication role
kubectl exec -i vault-0 --namespace=vault -- vault write auth/kubernetes/role/csi \
bound_service_account_names=default \
bound_service_account_namespaces=default,test-ns,negative-test-ns \
policies=csi \
ttl=20m
}
@test "deploy vault secretproviderclass crd" {
kubectl apply -f $BATS_TESTS_DIR/vault_v1_secretproviderclass.yaml
kubectl wait --for condition=established --timeout=60s crd/secretproviderclasses.secrets-store.csi.x-k8s.io
cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/vault-foo -o yaml | grep vault"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
}
@test "CSI inline volume test with pod portability" {
kubectl apply -f $BATS_TESTS_DIR/pod-vault-inline-volume-secretproviderclass.yaml
# wait for pod to be running
kubectl wait --for=condition=Ready --timeout=60s pod/secrets-store-inline
run kubectl get pod/secrets-store-inline
assert_success
}
@test "CSI inline volume test with pod portability - read vault secret from pod" {
result=$(kubectl exec secrets-store-inline -- cat /mnt/secrets-store/bar)
[[ "$result" == "hello" ]]
result=$(kubectl exec secrets-store-inline -- cat /mnt/secrets-store/bar1)
[[ "$result" == "hello1" ]]
}
@test "CSI inline volume test with pod portability - rotation succeeds" {
# seed first value
kubectl exec vault-0 --namespace=vault -- vault kv put secret/rotation foo=start
# deploy pod
kubectl apply -f $BATS_TESTS_DIR/pod-vault-rotation.yaml
kubectl wait --for=condition=Ready --timeout=60s pod/secrets-store-rotation
run kubectl get pod/secrets-store-rotation
assert_success
# verify starting value
result=$(kubectl exec secrets-store-rotation -- cat /mnt/secrets-store/foo)
[[ "$result" == "start" ]]
# update the secret value
kubectl exec vault-0 --namespace=vault -- vault kv put secret/rotation foo=rotated
sleep 60
# verify rotated value
result=$(kubectl exec secrets-store-rotation -- cat /mnt/secrets-store/foo)
[[ "$result" == "rotated" ]]
}
@test "CSI inline volume test with pod portability - unmount succeeds" {
# On Linux a failure to unmount the tmpfs will block the pod from being
# deleted.
run kubectl delete pod secrets-store-inline
assert_success
run kubectl wait --for=delete --timeout=${WAIT_TIME}s pod/secrets-store-inline
assert_success
# Sleep to allow time for logs to propagate.
sleep 10
# save debug information to archive in case of failure
archive_info
# On Windows, the failed unmount calls from: https://github.com/kubernetes-sigs/secrets-store-csi-driver/pull/545
# do not prevent the pod from being deleted. Search through the driver logs
# for the error.
run bash -c "kubectl logs -l app=secrets-store-csi-driver --tail -1 -c secrets-store -n kube-system | grep '^E.*failed to clean and unmount target path.*$'"
assert_failure
}
@test "Sync with K8s secrets - create deployment" {
kubectl apply -f $BATS_TESTS_DIR/vault_synck8s_v1_secretproviderclass.yaml
kubectl wait --for condition=established --timeout=60s crd/secretproviderclasses.secrets-store.csi.x-k8s.io
cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/vault-foo-sync -o yaml | grep vault"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
run kubectl apply -f $BATS_TESTS_DIR/deployment-synck8s.yaml
assert_success
run kubectl apply -f $BATS_TESTS_DIR/deployment-two-synck8s.yaml
assert_success
kubectl wait --for=condition=Ready --timeout=120s pod -l app=busybox
}
@test "Sync with K8s secrets - read secret from pod, read K8s secret, read env var, check secret ownerReferences with multiple owners" {
POD=$(kubectl get pod -l app=busybox -o jsonpath="{.items[0].metadata.name}")
result=$(kubectl exec $POD -- cat /mnt/secrets-store/bar)
[[ "$result" == "hello" ]]
result=$(kubectl exec $POD -- cat /mnt/secrets-store/bar1)
[[ "$result" == "hello1" ]]
result=$(kubectl exec $POD -- cat /mnt/secrets-store/nested/bar)
[[ "$result" == "hello" ]]
result=$(kubectl get secret foosecret -o jsonpath="{.data.pwd}" | base64 -d)
[[ "$result" == "hello" ]]
result=$(kubectl get secret foosecret -o jsonpath="{.data.nested}" | base64 -d)
[[ "$result" == "hello" ]]
result=$(kubectl exec $POD -- printenv | grep SECRET_USERNAME | awk -F"=" '{ print $2 }' | tr -d '\r\n')
[[ "$result" == "hello1" ]]
result=$(kubectl get secret foosecret -o jsonpath="{.metadata.labels.environment}")
[[ "${result//$'\r'}" == "${LABEL_VALUE}" ]]
result=$(kubectl get secret foosecret -o jsonpath="{.metadata.annotations.kubed\.appscode\.com\/sync}")
[[ "${result//$'\r'}" == "${ANNOTATION_VALUE}" ]]
result=$(kubectl get secret foosecret -o jsonpath="{.metadata.labels.secrets-store\.csi\.k8s\.io/managed}")
[[ "${result//$'\r'}" == "true" ]]
run wait_for_process $WAIT_TIME $SLEEP_TIME "compare_owner_count foosecret default 2"
assert_success
}
@test "Sync with K8s secrets - delete deployment, check secret is deleted" {
run kubectl delete -f $BATS_TESTS_DIR/deployment-synck8s.yaml
assert_success
run wait_for_process $WAIT_TIME $SLEEP_TIME "compare_owner_count foosecret default 1"
assert_success
run kubectl delete -f $BATS_TESTS_DIR/deployment-two-synck8s.yaml
assert_success
run wait_for_process $WAIT_TIME $SLEEP_TIME "check_secret_deleted foosecret default"
assert_success
run kubectl delete -f $BATS_TESTS_DIR/vault_synck8s_v1_secretproviderclass.yaml
assert_success
}
@test "Test Namespaced scope SecretProviderClass - create deployment" {
kubectl create ns test-ns
kubectl apply -f $BATS_TESTS_DIR/vault_v1_secretproviderclass_ns.yaml
kubectl wait --for condition=established --timeout=60s crd/secretproviderclasses.secrets-store.csi.x-k8s.io
cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/vault-foo-sync -o yaml | grep vault"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/vault-foo-sync -n test-ns -o yaml | grep vault"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
kubectl apply -n test-ns -f $BATS_TESTS_DIR/deployment-synck8s.yaml
kubectl wait --for=condition=Ready --timeout=90s pod -l app=busybox -n test-ns
}
@test "Test Namespaced scope SecretProviderClass - Sync with K8s secrets - read secret from pod, read K8s secret, read env var, check secret ownerReferences" {
POD=$(kubectl get pod -l app=busybox -n test-ns -o jsonpath="{.items[0].metadata.name}")
result=$(kubectl exec -n test-ns $POD -- cat /mnt/secrets-store/bar)
[[ "$result" == "hello" ]]
result=$(kubectl exec -n test-ns $POD -- cat /mnt/secrets-store/bar1)
[[ "$result" == "hello1" ]]
result=$(kubectl get secret foosecret -n test-ns -o jsonpath="{.data.pwd}" | base64 -d)
[[ "$result" == "hello" ]]
result=$(kubectl exec -n test-ns $POD -- printenv | grep SECRET_USERNAME | awk -F"=" '{ print $2 }' | tr -d '\r\n')
[[ "$result" == "hello1" ]]
run wait_for_process $WAIT_TIME $SLEEP_TIME "compare_owner_count foosecret test-ns 1"
assert_success
}
@test "Test Namespaced scope SecretProviderClass - Sync with K8s secrets - delete deployment, check secret deleted" {
run kubectl delete -f $BATS_TESTS_DIR/deployment-synck8s.yaml -n test-ns
assert_success
run wait_for_process $WAIT_TIME $SLEEP_TIME "check_secret_deleted foosecret test-ns"
assert_success
}
@test "Test Namespaced scope SecretProviderClass - Should fail when no secret provider class in same namespace" {
kubectl create ns negative-test-ns
kubectl apply -n negative-test-ns -f $BATS_TESTS_DIR/deployment-synck8s.yaml
POD=$(kubectl get pod -l app=busybox -n negative-test-ns -o jsonpath="{.items[0].metadata.name}")
cmd="kubectl describe pod $POD -n negative-test-ns | grep 'FailedMount.*failed to get secretproviderclass negative-test-ns/vault-foo-sync.*not found'"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
run kubectl delete -f $BATS_TESTS_DIR/deployment-synck8s.yaml -n negative-test-ns
assert_success
run kubectl delete ns negative-test-ns
assert_success
}
@test "deploy multiple vault secretproviderclass crd" {
kubectl apply -f $BATS_TESTS_DIR/vault_v1_multiple_secretproviderclass.yaml
cmd="kubectl wait --for condition=established --timeout=60s crd/secretproviderclasses.secrets-store.csi.x-k8s.io"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/vault-foo-sync-0 -o yaml | grep vault-foo-sync-0"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
cmd="kubectl get secretproviderclasses.secrets-store.csi.x-k8s.io/vault-foo-sync-1 -o yaml | grep vault-foo-sync-1"
wait_for_process $WAIT_TIME $SLEEP_TIME "$cmd"
}
@test "deploy pod with multiple secret provider class" {
kubectl apply -f $BATS_TESTS_DIR/pod-vault-inline-volume-multiple-spc.yaml
kubectl wait --for=condition=Ready --timeout=90s pod/secrets-store-inline-multiple-crd
run kubectl get pod/secrets-store-inline-multiple-crd
assert_success
}
@test "CSI inline volume test with multiple secret provider class" {
result=$(kubectl exec secrets-store-inline-multiple-crd -- cat /mnt/secrets-store-0/bar)
[[ "$result" == "hello" ]]
result=$(kubectl exec secrets-store-inline-multiple-crd -- cat /mnt/secrets-store-0/bar1)
[[ "$result" == "hello1" ]]
result=$(kubectl get secret foosecret-0 -o jsonpath="{.data.pwd}" | base64 -d)
[[ "$result" == "hello" ]]
result=$(kubectl exec secrets-store-inline-multiple-crd -- printenv | grep SECRET_USERNAME_0 | awk -F"=" '{ print $2 }' | tr -d '\r\n')
[[ "$result" == "hello1" ]]
run wait_for_process $WAIT_TIME $SLEEP_TIME "compare_owner_count foosecret-0 default 1"
assert_success
result=$(kubectl exec secrets-store-inline-multiple-crd -- cat /mnt/secrets-store-1/bar)
[[ "$result" == "hello" ]]
result=$(kubectl exec secrets-store-inline-multiple-crd -- cat /mnt/secrets-store-1/bar1)
[[ "$result" == "hello1" ]]
result=$(kubectl get secret foosecret-1 -o jsonpath="{.data.pwd}" | base64 -d)
[[ "$result" == "hello" ]]
result=$(kubectl exec secrets-store-inline-multiple-crd -- printenv | grep SECRET_USERNAME_1 | awk -F"=" '{ print $2 }' | tr -d '\r\n')
[[ "$result" == "hello1" ]]
run wait_for_process $WAIT_TIME $SLEEP_TIME "compare_owner_count foosecret-1 default 1"
assert_success
}
teardown_file() {
archive_info || true
}