diff --git a/k8s/01_install_k8s.sh b/k8s/01_install_k8s.sh index 9c73ce81..0e65ec55 100755 --- a/k8s/01_install_k8s.sh +++ b/k8s/01_install_k8s.sh @@ -81,9 +81,10 @@ popd echo "Installing InfluxDB" pushd influxdb -helm install influxdb influxdata/influxdb -f ./values.yaml +helm install influxdb bitnami/influxdb -f ./values.yaml popd + echo "Installing Redis and Grafana dashboards" pushd testground-infra helm dep build diff --git a/k8s/02_efs.sh b/k8s/02_efs.sh index 1d59fa4c..421f4af9 100755 --- a/k8s/02_efs.sh +++ b/k8s/02_efs.sh @@ -12,12 +12,14 @@ trap 'err_report $LINENO' ERR START_TIME=`date +%s` +CLUSTER_SPEC_TEMPLATE=$1 + my_dir="$(dirname "$0")" source "$my_dir/install-playbook/validation.sh" echo "Installing EFS..." -vpcId=`aws ec2 describe-vpcs --region=$AWS_REGION --filters Name=tag:Name,Values=$NAME --output text | awk '/VPCS/ { print $8 }'` +vpcId=`aws ec2 describe-vpcs --region=$AWS_REGION --filters Name=tag:Name,Values=$CLUSTER_NAME --output text | awk '/VPCS/ { print $8 }'` if [[ -z ${vpcId} ]]; then echo "Couldn't detect AWS VPC created by `kops`" @@ -26,7 +28,7 @@ fi echo "Detected VPC: $vpcId" -securityGroupId=`aws ec2 describe-security-groups --region=$AWS_REGION --output text | awk '/nodes.'$NAME'/ && /SECURITYGROUPS/ { print $6 };'` +securityGroupId=`aws ec2 describe-security-groups --region=$AWS_REGION --output text | awk '/nodes.'$CLUSTER_NAME'/ && /SECURITYGROUPS/ { print $6 };'` if [[ -z ${securityGroupId} ]]; then echo "Couldn't detect AWS Security Group created by `kops`" diff --git a/k8s/03_ebs.sh b/k8s/03_ebs.sh index 23dee720..b7456da0 100755 --- a/k8s/03_ebs.sh +++ b/k8s/03_ebs.sh @@ -12,6 +12,8 @@ trap 'err_report $LINENO' ERR START_TIME=`date +%s` +CLUSTER_SPEC_TEMPLATE=$1 + my_dir="$(dirname "$0")" source "$my_dir/install-playbook/validation.sh" diff --git a/k8s/04_testground_daemon.sh b/k8s/04_testground_daemon.sh index 61cf5b52..9012f188 100755 --- a/k8s/04_testground_daemon.sh +++ b/k8s/04_testground_daemon.sh @@ -12,6 +12,8 @@ trap 'err_report $LINENO' ERR START_TIME=`date +%s` +CLUSTER_SPEC_TEMPLATE=$1 + my_dir="$(dirname "$0")" source "$my_dir/install-playbook/validation.sh" diff --git a/k8s/cluster.yaml b/k8s/cluster.yaml index 476112c7..0c1b8b47 100644 --- a/k8s/cluster.yaml +++ b/k8s/cluster.yaml @@ -2,8 +2,10 @@ apiVersion: kops.k8s.io/v1alpha2 kind: Cluster metadata: creationTimestamp: null - name: ${NAME} + name: ${CLUSTER_NAME} spec: + docker: + skipInstall: true cloudLabels: Team: ${TEAM} Project: ${PROJECT} @@ -27,7 +29,7 @@ spec: rbac: {} channel: stable cloudProvider: aws - configBase: ${KOPS_STATE_STORE}/${NAME} + configBase: ${KOPS_STATE_STORE}/${CLUSTER_NAME} docker: defaultUlimit: - "nofile=${ULIMIT_NOFILE}" @@ -83,9 +85,9 @@ spec: kubeAPIBurst: 40 kubernetesApiAccess: - 0.0.0.0/0 - kubernetesVersion: 1.18.3 - masterInternalName: api.internal.${NAME} - masterPublicName: api.${NAME} + kubernetesVersion: 1.18.10 + masterInternalName: api.internal.${CLUSTER_NAME} + masterPublicName: api.${CLUSTER_NAME} networkCIDR: 172.20.0.0/16 networking: flannel: @@ -115,7 +117,7 @@ kind: InstanceGroup metadata: creationTimestamp: null labels: - kops.k8s.io/cluster: ${NAME} + kops.k8s.io/cluster: ${CLUSTER_NAME} name: master-${ZONE} spec: additionalUserData: @@ -139,7 +141,7 @@ spec: net.ipv4.neigh.default.gc_thresh2 = 4096 net.ipv4.neigh.default.gc_thresh3 = 32768 EOT - image: 909427826938/testground_2020-06-09 + image: 909427826938/testground_2020-10-30 machineType: ${MASTER_NODE_TYPE} maxSize: 1 minSize: 1 @@ -156,7 +158,7 @@ kind: InstanceGroup metadata: creationTimestamp: null labels: - kops.k8s.io/cluster: ${NAME} + kops.k8s.io/cluster: ${CLUSTER_NAME} name: nodes spec: additionalUserData: @@ -182,7 +184,7 @@ spec: EOT cloudLabels: testground.node.role.plan: "true" - image: 909427826938/testground_2020-06-09 + image: 909427826938/testground_2020-10-30 machineType: ${WORKER_NODE_TYPE} maxSize: ${WORKER_NODES} minSize: ${WORKER_NODES} @@ -201,7 +203,7 @@ kind: InstanceGroup metadata: creationTimestamp: null labels: - kops.k8s.io/cluster: ${NAME} + kops.k8s.io/cluster: ${CLUSTER_NAME} name: tginfra spec: additionalUserData: @@ -227,7 +229,7 @@ spec: EOT cloudLabels: testground.node.role.infra: "true" - image: 909427826938/testground_2020-06-09 + image: 909427826938/testground_2020-10-30 machineType: c5.2xlarge maxSize: 2 minSize: 2 diff --git a/k8s/delete_efs.sh b/k8s/delete_efs.sh index 482de3a8..3322f4c0 100755 --- a/k8s/delete_efs.sh +++ b/k8s/delete_efs.sh @@ -11,19 +11,19 @@ err_report() { trap 'err_report $LINENO' ERR -vpcId=`aws ec2 describe-vpcs --filters Name=tag:Name,Values=$NAME --output text | awk '/VPCS/ { print $8 }'` +vpcId=`aws ec2 describe-vpcs --filters Name=tag:Name,Values=$CLUSTER_NAME --output text | awk '/VPCS/ { print $8 }'` if [[ -z ${vpcId} ]]; then - echo "Couldn't detect AWS VPC created by `kops`" + echo "Couldn't detect AWS VPC created by kops" exit 1 fi echo "Detected VPC: $vpcId" -securityGroupId=`aws ec2 describe-security-groups --output text | awk '/nodes.'$NAME'/ && /SECURITYGROUPS/ { print $6 };'` +securityGroupId=`aws ec2 describe-security-groups --output text | awk '/nodes.'$CLUSTER_NAME'/ && /SECURITYGROUPS/ { print $6 };'` if [[ -z ${securityGroupId} ]]; then - echo "Couldn't detect AWS Security Group created by `kops`" + echo "Couldn't detect AWS Security Group created by kops" exit 1 fi @@ -41,7 +41,7 @@ pushd efs-terraform S3_BUCKET="${KOPS_STATE_STORE:5:100}" terraform init -backend-config=bucket=$S3_BUCKET \ - -backend-config=key=tf-efs-$NAME \ + -backend-config=key=${DEPLOYMENT_NAME}-efs \ -backend-config=region=$AWS_REGION terraform destroy -var aws_region=$AWS_REGION -var fs_subnet_id_zone_a=$subnetIdZoneA -var fs_subnet_id_zone_b=$subnetIdZoneB -var fs_sg_id=$securityGroupId -auto-approve diff --git a/k8s/delete_kops.sh b/k8s/delete_kops.sh index a958de06..33310f6e 100755 --- a/k8s/delete_kops.sh +++ b/k8s/delete_kops.sh @@ -11,4 +11,4 @@ err_report() { trap 'err_report $LINENO' ERR -kops delete cluster $NAME --yes +kops delete cluster $CLUSTER_NAME --yes diff --git a/k8s/influxdb/values.yaml b/k8s/influxdb/values.yaml index 98e38c42..2c52abe3 100644 --- a/k8s/influxdb/values.yaml +++ b/k8s/influxdb/values.yaml @@ -1,238 +1,7 @@ -## influxdb image version -## ref: https://hub.docker.com/r/library/influxdb/tags/ -image: - repository: "influxdb" - tag: "1.8-alpine" - pullPolicy: IfNotPresent - ## If specified, use these secrets to access the images - # pullSecrets: - # - registry-secret - - -serviceAccount: - create: true - name: - annotations: {} - -## Customize liveness, readiness and startup probes -## ref: https://docs.influxdata.com/influxdb/v1.7/tools/api/#ping-http-endpoint -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ -## -livenessProbe: {} - # initialDelaySeconds: 30 - # timeoutSeconds: 5 - # scheme: HTTP - -readinessProbe: {} - # initialDelaySeconds: 5 - # timeoutSeconds: 1 - # scheme: HTTP - -securityContext: {} - # runAsUser: 999 - # runAsGroup: 999 - -startupProbe: - enabled: false - # failureThreshold: 6 - # periodSeconds: 5 - # scheme: HTTP - -## Specify a service type -## NodePort is default -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - ## Add annotations to service - # annotations: {} - type: ClusterIP - -## Persist data to a persistent volume -## -persistence: - enabled: true - storageClass: "gp2" - annotations: - accessMode: ReadWriteOnce - size: 8Gi - -## Deploy InfluxDB Enterprise - License required -## ref: https://www.influxdata.com/products/influxdb-enterprise/ -enterprise: - enabled: false - licensekey: {} - clusterSize: 4 - meta: - image: - ## This image contains the enterprise meta node package for clustering. - ## It is meant to be used in conjunction with the influxdb:data package of the same version. - ## ref: https://hub.docker.com/_/influxdb - tag: meta - clusterSize: 3 - ## seed is hashed and used as `internal-shared-secret` for Meta service. - seed: dead-beef-cafe-bae - ## Configure resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: {} - # resources: - # requests: - # memory: 512Mi - # cpu: 2 - # limits: - # memory: 1Gi - # cpu: 4 - -## Create default user through Kubernetes job -## Defaults indicated below -## -setDefaultUser: - enabled: false - - ## Image of the container used for job - ## Default: appropriate/curl:latest - ## - image: appropriate/curl:latest - - ## Deadline for job so it does not retry forever. - ## Default: activeDeadline: 300 - ## - activeDeadline: 300 - - ## Specify the number of retries before considering job as failed. - ## https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#pod-backoff-failure-policy - ## - backoffLimit: 6 - - ## Hook delete policy for helm. - ## Default: hookDeletePolicy: hook-succeeded - ## - hookDeletePolicy: hook-succeeded - - ## Restart policy for job - ## Default: OnFailure - restartPolicy: OnFailure - - user: - - ## The user name - ## Default: "admin" - username: "admin" - - ## User password - ## single quotes must be escaped (\') - ## Default: (Randomly generated 10 characters of AlphaNum) - # password: - - ## The user name and password are obtained from an existing secret. The expected - ## keys are `influxdb-user` and `influxdb-password`. - ## If set, the username and password values above are ignored. - # existingSecret: influxdb-auth - - ## User privileges - ## Default: "WITH ALL PRIVILEGES" - privileges: "WITH ALL PRIVILEGES" - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -resources: - limits: - memory: 2048Mi - requests: - cpu: 1000m - memory: 2048Mi - -# Annotations to be added to InfluxDB pods -podAnnotations: {} - -ingress: - enabled: false - tls: false - # secretName: my-tls-cert # only needed if tls above is true - hostname: influxdb.foobar.com - annotations: {} - # kubernetes.io/ingress.class: "nginx" - # kubernetes.io/tls-acme: "true" - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: - -## Node labels for pod assignment -## Ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: - testground.node.role.infra: "true" - -## Affinity for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Tolerations for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] -# - key: "key" -# operator: "Equal|Exists" -# value: "value" -# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - -## The InfluxDB image uses several environment variables to automatically -## configure certain parts of the server. -## Ref: https://hub.docker.com/_/influxdb/ -env: - - name: INFLUXDB_DB - value: "testground" - -## InfluxDB configuration -## ref: https://docs.influxdata.com/influxdb/v1.7/administration/config -config: - reporting_disabled: false - rpc: {} - meta: {} - data: {} - coordinator: {} - retention: {} - shard_precreation: {} - monitor: {} - http: {} - logging: {} - subscriber: {} - graphite: {} - collectd: {} - opentsdb: {} - udp: {} - continuous_queries: {} - tls: {} - -# Allow executing custom init scripts -# -# If the container finds any files with the extensions .sh or .iql inside of the -# /docker-entrypoint-initdb.d folder, it will execute them. The order they are -# executed in is determined by the shell. This is usually alphabetical order. -initScripts: - enabled: false - scripts: - init.iql: |+ - CREATE DATABASE "telegraf" WITH DURATION 30d REPLICATION 1 NAME "rp_30d" - -backup: - enabled: false - schedule: "0 0 * * *" - annotations: {} - podAnnotations: {} - - ## Google Cloud Storage - # gcs: - # serviceAccountSecret: influxdb-backup-key - # serviceAccountSecretKey: key.json - # destination: gs://bucket/influxdb - - ## Azure - ## Secret is expected to have connection string stored in `connection-string` field - ## Existing container will be used or private one withing storage account will be created. - # azure: - # storageAccountSecret: influxdb-backup-azure-key - # destination_container: influxdb-container - # destination_path: "" +authEnabled: false +influxdb: + nodeSelector: + testground.node.role.infra: "true" + service: + type: ClusterIP + clusterIP: None diff --git a/k8s/install-playbook/validation.sh b/k8s/install-playbook/validation.sh index 57ff356b..f0d8a361 100644 --- a/k8s/install-playbook/validation.sh +++ b/k8s/install-playbook/validation.sh @@ -7,7 +7,7 @@ set -e # Validate required arguments if [ -z "$CLUSTER_SPEC_TEMPLATE" ] then - echo -e "Please provider cluster spec template file as argument. For example: \`./install.sh cluster.yaml\`" + echo -e "Please provider cluster spec template file as argument. For example: \`./01_install.sh cluster.yaml\`" exit 2 fi if [ ! -f "$CLUSTER_SPEC_TEMPLATE" ]; then diff --git a/k8s/kops-weave/flannel.yml b/k8s/kops-weave/flannel.yml index dfaef921..0115eaad 100644 --- a/k8s/kops-weave/flannel.yml +++ b/k8s/kops-weave/flannel.yml @@ -171,7 +171,7 @@ spec: priorityClassName: system-node-critical initContainers: - name: install-cni - image: quay.io/coreos/flannel:v0.11.0-amd64 + image: quay.io/coreos/flannel:v0.13.0-amd64 command: - cp args: @@ -185,7 +185,7 @@ spec: mountPath: /etc/kube-flannel/ containers: - name: kube-flannel - image: quay.io/coreos/flannel:v0.11.0-amd64 + image: quay.io/coreos/flannel:v0.13.0-amd64 command: - /opt/bin/flanneld args: diff --git a/k8s/kops-weave/weave.yml b/k8s/kops-weave/weave.yml index a315d151..715db5ef 100644 --- a/k8s/kops-weave/weave.yml +++ b/k8s/kops-weave/weave.yml @@ -194,8 +194,7 @@ items: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - #image: 'docker.io/weaveworks/weave-kube:2.6.0' - image: 'iptestground/weave-kube:0.0.1' + image: 'iptestground/weave-kube:0.0.2-v2.7.0-63821434' readinessProbe: httpGet: host: 127.0.0.1 diff --git a/k8s/packer/Makefile b/k8s/packer/Makefile index 79a37d44..e76de25d 100644 --- a/k8s/packer/Makefile +++ b/k8s/packer/Makefile @@ -1,2 +1,2 @@ build-ami-image: - packer build -var 'source_ami=ami-093dda5e01f6102a8' -var 'aws_region=eu-central-1' testground-ami.json + packer build -var 'source_ami=ami-0ed4a9453b39ea8c1' -var 'aws_region=eu-west-2' testground-ami.json diff --git a/k8s/packer/README.md b/k8s/packer/README.md index 1bbc474c..86d2f557 100644 --- a/k8s/packer/README.md +++ b/k8s/packer/README.md @@ -1,15 +1,6 @@ ## Background -Testground AMI image is currently based on `k8s-1.17-debian-stretch-amd64-hvm-ebs-2020-01-17` - -You can get a specific AMI for a given region with: - -``` -aws ec2 describe-images --region eu-west-2 --output table \ - --owners 383156758163 \ - --query "sort_by(Images, &CreationDate)[*].[CreationDate,Name,ImageId]" \ - --filters "Name=name,Values=*-debian-stretch-*" -``` +Testground AMI image is currently based on `099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20201014` (since this is what kops v1.18.2 is using) --- diff --git a/k8s/packer/distribute-image.sh b/k8s/packer/distribute-image.sh index 5452ed8c..9cb6d6be 100755 --- a/k8s/packer/distribute-image.sh +++ b/k8s/packer/distribute-image.sh @@ -26,10 +26,10 @@ then exit 2 fi -aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-central-1 --region eu-west-1 -aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-central-1 --region eu-west-2 -aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-central-1 --region us-east-1 -aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-central-1 --region us-east-2 -aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-central-1 --region us-west-1 -aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-central-1 --region us-west-2 -aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-central-1 --region ap-southeast-1 +aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-west-2 --region eu-west-1 +aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-west-2 --region eu-central-1 +aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-west-2 --region us-east-1 +aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-west-2 --region us-east-2 +aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-west-2 --region us-west-1 +aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-west-2 --region us-west-2 +aws ec2 copy-image --name $NAME --source-image-id $SOURCE_IMAGE_ID --source-region eu-west-2 --region ap-southeast-1 diff --git a/k8s/packer/docker-pull-images.sh b/k8s/packer/docker-pull-images.sh index dc015f46..c3deeece 100644 --- a/k8s/packer/docker-pull-images.sh +++ b/k8s/packer/docker-pull-images.sh @@ -1,34 +1,47 @@ #!/bin/bash -sudo docker pull kope/kops-controller:1.18.0-beta.1 -sudo docker pull kope/dns-controller:1.18.0-beta.1 -sudo docker pull kope/kube-apiserver-healthcheck:1.18.0-beta.1 -sudo docker pull kopeio/etcd-manager:3.0.20200531 -sudo docker pull k8s.gcr.io/kube-proxy:v1.18.3 -sudo docker pull k8s.gcr.io/kube-apiserver:v1.18.3 -sudo docker pull k8s.gcr.io/kube-scheduler:v1.18.3 -sudo docker pull k8s.gcr.io/kube-controller-manager:v1.18.3 + +sudo apt update +sudo apt install -y docker.io + +sudo docker pull bitnami/redis:5.0.8-debian-10-r39 +sudo docker pull bitnami/redis-exporter:1.5.2-debian-10-r27 +sudo docker pull busybox:1.31.1 +sudo docker pull busybox:1.31.1-glibc +sudo docker pull grafana/grafana:7.0.3 +sudo docker pull influxdb:1.8-alpine +sudo docker pull iptestground/curl-ssl:0.0.1 +sudo docker pull iptestground/goproxy:2.0.2 +sudo docker pull iptestground/weave-kube:0.0.2-v2.7.0-63821434 +sudo docker pull jettech/kube-webhook-certgen:v1.2.1 +sudo docker pull jettech/kube-webhook-certgen:v1.3.0 +sudo docker pull jimmidyson/configmap-reload:v0.3.0 +sudo docker pull k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.8.1 sudo docker pull k8s.gcr.io/coredns:1.6.7 -sudo docker pull k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0 +sudo docker pull k8s.gcr.io/kube-apiserver:v1.18.10 +sudo docker pull k8s.gcr.io/kube-controller-manager:v1.18.10 +sudo docker pull k8s.gcr.io/kube-proxy:v1.18.10 +sudo docker pull k8s.gcr.io/kube-scheduler:v1.18.10 sudo docker pull k8s.gcr.io/pause-amd64:3.2 -sudo docker pull quay.io/prometheus/node-exporter:v1.0.0 -sudo docker pull quay.io/prometheus/prometheus:v2.18.1 +sudo docker pull kiwigrid/k8s-sidecar:0.1.151 +sudo docker pull kope/dns-controller:1.18.2 +sudo docker pull kope/kops-controller:1.18.2 +sudo docker pull kope/kube-apiserver-healthcheck:1.18.2 +sudo docker pull kopeio/etcd-manager:3.0.20200531 +sudo docker pull quay.io/coreos/flannel:v0.11.0-amd64 +sudo docker pull quay.io/coreos/flannel:v0.13.0-amd64 +sudo docker pull quay.io/coreos/prometheus-config-reloader:v0.38.1 +sudo docker pull quay.io/coreos/prometheus-operator:v0.38.1 sudo docker pull quay.io/external_storage/efs-provisioner:v2.4.0 sudo docker pull quay.io/huawei-cni-genie/genie-admission-controller:1382 sudo docker pull quay.io/huawei-cni-genie/genie-plugin:1382 -sudo docker pull quay.io/coreos/flannel:v0.11.0-amd64 -sudo docker pull grafana/grafana:7.0.3 -sudo docker pull busybox:1.31.1 -sudo docker pull kiwigrid/k8s-sidecar:0.1.151 -sudo docker pull influxdb:1.8-alpine -sudo docker pull bitnami/redis:5.0.8-debian-10-r39 -sudo docker pull docker.io/bitnami/redis-exporter:1.5.2-debian-10-r27 -sudo docker pull iptestground/weave-kube:0.0.1 -sudo docker pull iptestground/curl-ssl:0.0.1 -sudo docker pull iptestground/goproxy:2.0.2 +sudo docker pull quay.io/jetstack/cert-manager-webhook:v0.16.0 +sudo docker pull quay.io/prometheus/node-exporter:v1.0.0 +sudo docker pull quay.io/prometheus/prometheus:v2.18.2 +sudo docker pull squareup/ghostunnel:v1.5.2 TEMPDIR=`mktemp -d` pushd $TEMPDIR -wget https://kubeupv2.s3.amazonaws.com/kops/1.8.1/images/protokube.tar.gz +wget https://kubeupv2.s3.amazonaws.com/kops/1.18.2/images/protokube.tar.gz sudo docker load < protokube.tar.gz popd diff --git a/k8s/packer/testground-ami.json b/k8s/packer/testground-ami.json index 7d29e370..e7ef79e0 100644 --- a/k8s/packer/testground-ami.json +++ b/k8s/packer/testground-ami.json @@ -12,7 +12,7 @@ "region": "{{user `aws_region`}}", "source_ami": "{{user `source_ami`}}", "instance_type": "m4.xlarge", - "ssh_username": "admin", + "ssh_username": "ubuntu", "ami_name": "testground_{{isotime \"2006-01-02\"}}" }], "provisioners": [ diff --git a/k8s/testground-daemon/role-binding.yml b/k8s/testground-daemon/role-binding.yml index 43f9a474..2ce84fca 100644 --- a/k8s/testground-daemon/role-binding.yml +++ b/k8s/testground-daemon/role-binding.yml @@ -9,7 +9,7 @@ rules: # at the HTTP level, the name of the resource for accessing Secret # objects is "secrets" resources: ["pods", "nodes", "pods/log", "pods/exec", "events", "persistentvolumeclaims", "persistentvolumes"] - verbs: ["get", "watch", "list", "edit", "create", "delete"] + verbs: ["get", "watch", "list", "edit", "create", "delete", "deletecollection"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding