From 3b21e96859d47300ade5e362146381f7145ff998 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Tue, 8 Feb 2022 10:22:07 -0300 Subject: [PATCH 01/17] Creating clusters using vcluster --- docker-compose.yaml | 56 ++ kubeconfig.yaml | 19 + test.dockerfile | 21 + test/acceptance/vcluster/acceptance.go | 28 + .../vcluster/vcluster_suite_test.go | 19 + test/vcluster/manifests/manifests.yaml.tpl | 214 ++++++ .../manifests/nginx-ingress-deploy.yaml | 685 ++++++++++++++++++ .../manifests/vcluster-ingress.yaml.tpl | 22 + .../manifests/vcluster-values.yaml.tpl | 5 + test/vcluster/vcluster.go | 291 ++++++++ 10 files changed, 1360 insertions(+) create mode 100644 docker-compose.yaml create mode 100644 kubeconfig.yaml create mode 100644 test.dockerfile create mode 100644 test/acceptance/vcluster/acceptance.go create mode 100644 test/acceptance/vcluster/vcluster_suite_test.go create mode 100644 test/vcluster/manifests/manifests.yaml.tpl create mode 100644 test/vcluster/manifests/nginx-ingress-deploy.yaml create mode 100644 test/vcluster/manifests/vcluster-ingress.yaml.tpl create mode 100644 test/vcluster/manifests/vcluster-values.yaml.tpl create mode 100644 test/vcluster/vcluster.go diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 00000000000..5d9d6f68e7c --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,56 @@ +# to run define K3S_TOKEN, K3S_VERSION is optional, eg: +# K3S_TOKEN=${RANDOM}${RANDOM}${RANDOM} docker-compose up + +version: '3' +services: + k3s: + image: "rancher/k3s:${K3S_VERSION:-latest}" + command: server --no-deploy traefik --tls-san k3s + networks: + cluster: + ipv4_address: 172.10.0.150 + tmpfs: + - /run + - /var/run + ulimits: + nproc: 65535 + nofile: + soft: 65535 + hard: 65535 + privileged: true + restart: always + environment: + - K3S_TOKEN=foo + - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml + - K3S_KUBECONFIG_MODE=666 + volumes: + - k3s-server:/var/lib/rancher/k3s + # This is just so that we get the kubeconfig file out + - .:/output + ports: + - 6443:6443 # Kubernetes API Server + - 80:80 # Ingress controller port 80 + - 443:443 # Ingress controller port 443 + + test: + build: + context: . + dockerfile: test.dockerfile + command: go test github.com/weaveworks/weave-gitops/test/acceptance/vcluster + environment: + - KUBECONFIG=/app/kubeconfig.yaml + volumes: + - .:/app + working_dir: /app + depends_on: + - k3s + networks: + - cluster +volumes: + k3s-server: {} + +networks: + cluster: + ipam: + config: + - subnet: 172.10.0.0/24 diff --git a/kubeconfig.yaml b/kubeconfig.yaml new file mode 100644 index 00000000000..b2721211377 --- /dev/null +++ b/kubeconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyTkRNNU9UVXlNRE13SGhjTk1qSXdNakEwTVRjeU1EQXpXaGNOTXpJd01qQXlNVGN5TURBegpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyTkRNNU9UVXlNRE13V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTOWNuenVrdis0OUZwdDl4djQyQzVWNGZseFhjTFd1a1VOMUJiM0tKaE4KUEhGVWNxNktZdFJDZnNZQmkrSXlWalNqcnIzM05ZRWNEb2JtYlJtWW12Z0pvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVdPNUxwVWpWSjJkM0cvaDFWVXdPCmNuR2ZsZTh3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBOYnBXWGl6U3pEeEMwWHE1WWtKVThBR3NvM1ZpWU0KL3d0VCtObXVCYWJRQWlFQTRSUTI0SVgyOUpsTUIwekkxL0xIQzA2M0N1bjVUZk9qYlgxd3pJTmVZRG89Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + server: https://k3s:6443 + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJREJrVWN1bk9yRjR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOalF6T1RrMU1qQXpNQjRYRFRJeU1ESXdOREUzTWpBd00xb1hEVEl6TURJdwpOREUzTWpBd00xb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJLQ2NyMnN4MU9leTV4WUwKVmhDd0ZBS3FOdVlGaGZqUlltOTY1cmRIU2lZQWdxaHV5ZXdVblBhcEtZYXRpS1l5TzRKRjMvUUtTWU9yVDMwLwo3dTdtbnF1alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVDk0ZjdHS1NhSkhNWXl6R1ZrZ2R5K0tDYkFhVEFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQTZJUm9sRWtRdjVITFkxUXRBTmt3ZnVDZzIvMTUyS1UwTGhXa004QVVMYk1DSVFDaWpzR1daRDNCN2x1agpXL3c4b0tqdVVpViszclpZL013ODllT285a1dvU1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFMk5ETTVPVFV5TURNd0hoY05Nakl3TWpBME1UY3lNREF6V2hjTk16SXdNakF5TVRjeU1EQXoKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFMk5ETTVPVFV5TURNd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBUmx2S3hTVlhicnlXcWdUdDZnZ2k2Qyt4dTJjL1NzMDd5eGo1am9JT2paCjltR043SWFJZmJTVWtSYW10NEl1WDhQS0creXVQQ25KNjRQaktkVC9zT3VqbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVUvZUgreGlrbWlSekdNc3hsWklIYwp2aWdtd0drd0NnWUlLb1pJemowRUF3SURTQUF3UlFJaEFJdHNTMFY1OE5TNjh6ZnhnSVEwdHpwSW03UURZdlFYCkV2ejVBTU02bkMzNUFpQWRjK2R2bmdUQXhzWFdwRUhKNGQ0MCtIK0lSd290OCtkbThEbi9zN3JacVE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBOUk8xcWc1dk5WdER2c1YrV1lTMi81MHAzbnc0NXZ3UXJreVA0M0N6MkhvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFb0p5dmF6SFU1N0xuRmd0V0VMQVVBcW8yNWdXRitORmliM3JtdDBkS0pnQ0NxRzdKN0JTYwo5cWtwaHEySXBqSTdna1hmOUFwSmc2dFBmVC91N3VhZXF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/test.dockerfile b/test.dockerfile new file mode 100644 index 00000000000..e926ceb0a52 --- /dev/null +++ b/test.dockerfile @@ -0,0 +1,21 @@ +# Go build +FROM golang:1.17 AS go-build +# Add a kubectl +RUN apt-get install -y apt-transport-https ca-certificates curl openssh-client && \ + curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg \ + https://packages.cloud.google.com/apt/doc/apt-key.gpg && \ + echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] \ + https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list && \ + apt-get update && \ + apt-get install -y kubectl + +RUN curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-arm64)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; +RUN mv vcluster /usr/local/bin; + +RUN curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + +RUN mkdir /app +WORKDIR /app +COPY go.mod . +COPY go.sum . +RUN go mod download diff --git a/test/acceptance/vcluster/acceptance.go b/test/acceptance/vcluster/acceptance.go new file mode 100644 index 00000000000..a465248b861 --- /dev/null +++ b/test/acceptance/vcluster/acceptance.go @@ -0,0 +1,28 @@ +package vcluster + +import ( + "context" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/weaveworks/weave-gitops/test/vcluster" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/rand" +) + +var _ = Describe("Acceptance PoC", func() { + BeforeEach(func() { + clusterFactory, err := vcluster.NewFactory() + Expect(err).To(BeNil(), "creating new factory") + client, err := clusterFactory.Create(context.TODO(), "test-"+rand.String(10)) + Expect(err).To(BeNil(), "creating new cluster") + + namespaceObj := &corev1.Namespace{} + namespaceObj.Name = "test" + Expect(client.Create(context.TODO(), namespaceObj)).To(Succeed()) + }) + + It("Verify that gitops-flux can print out the version of flux", func() { + + }) +}) diff --git a/test/acceptance/vcluster/vcluster_suite_test.go b/test/acceptance/vcluster/vcluster_suite_test.go new file mode 100644 index 00000000000..cd112608cf4 --- /dev/null +++ b/test/acceptance/vcluster/vcluster_suite_test.go @@ -0,0 +1,19 @@ +package vcluster_test + +import ( + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/weaveworks/weave-gitops/test/vcluster" +) + +func TestVcluster(t *testing.T) { + if err := vcluster.InstallNginxIngressController(); err != nil { + t.Errorf("failed installing ingress controller: %w", err) + t.FailNow() + } + + RegisterFailHandler(Fail) + RunSpecs(t, "Vcluster Suite") +} diff --git a/test/vcluster/manifests/manifests.yaml.tpl b/test/vcluster/manifests/manifests.yaml.tpl new file mode 100644 index 00000000000..9c8ce6995f6 --- /dev/null +++ b/test/vcluster/manifests/manifests.yaml.tpl @@ -0,0 +1,214 @@ +--- +# Source: vcluster/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: vc-{{.Name}} + namespace: {{.Name}} + labels: + app: vcluster + chart: "vcluster-0.5.3" + release: "{{.Name}}" + heritage: "Helm" +--- +# Source: vcluster/templates/rbac/role.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{.Name}} + namespace: {{.Name}} + labels: + app: vcluster + chart: "vcluster-0.5.3" + release: "{{.Name}}" + heritage: "Helm" +rules: + - apiGroups: [""] + resources: ["configmaps", "secrets", "services", "pods", "pods/attach", "pods/portforward", "pods/exec", "endpoints", "persistentvolumeclaims"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] + - apiGroups: [""] + resources: ["events", "pods/log"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["create", "delete", "patch", "update", "get", "list", "watch"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "deployments"] + verbs: ["get", "list", "watch"] +--- +# Source: vcluster/templates/rbac/rolebinding.yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{.Name}} + namespace: {{.Name}} + labels: + app: vcluster + chart: "vcluster-0.5.3" + release: "{{.Name}}" + heritage: "Helm" +subjects: + - kind: ServiceAccount + name: vc-{{.Name}} + namespace: {{.Name}} +roleRef: + kind: Role + name: {{.Name}} + apiGroup: rbac.authorization.k8s.io +--- +# Source: vcluster/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: {{.Name}} + namespace: {{.Name}} + labels: + app: vcluster + chart: "vcluster-0.5.3" + release: "{{.Name}}" + heritage: "Helm" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + targetPort: 8443 + protocol: TCP + selector: + app: vcluster + release: {{.Name}} +--- +# Source: vcluster/templates/statefulset-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: {{.Name}}-headless + namespace: {{.Name}} + labels: + app: {{.Name}} + chart: "vcluster-0.5.3" + release: "{{.Name}}" + heritage: "Helm" +spec: + ports: + - name: https + port: 443 + targetPort: 8443 + protocol: TCP + clusterIP: None + selector: + app: vcluster + release: "{{.Name}}" +--- +# Source: vcluster/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{.Name}} + namespace: {{.Name}} + labels: + app: vcluster + chart: "vcluster-0.5.3" + release: "{{.Name}}" + heritage: "Helm" +spec: + serviceName: {{.Name}}-headless + replicas: 1 + selector: + matchLabels: + app: vcluster + release: {{.Name}} + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: [ "ReadWriteOnce" ] + storageClassName: + resources: + requests: + storage: 5Gi + template: + metadata: + labels: + app: vcluster + release: {{.Name}} + spec: + terminationGracePeriodSeconds: 10 + nodeSelector: + {} + affinity: + {} + tolerations: + [] + serviceAccountName: vc-{{.Name}} + volumes: + containers: + - image: rancher/k3s:v1.21.4-k3s1 + name: vcluster + # k3s has a problem running as pid 1 and disabled agents on cgroupv2 + # nodes as it will try to evacuate the cgroups there. Starting k3s + # through a shell makes it non pid 1 and prevents this from happening + command: + - /bin/sh + args: + - -c + - /bin/k3s + server + --write-kubeconfig=/data/k3s-config/kube-config.yaml + --data-dir=/data + --disable=traefik,servicelb,metrics-server,local-storage,coredns + --disable-network-policy + --disable-agent + --disable-scheduler + --disable-cloud-controller + --flannel-backend=none + --kube-controller-manager-arg=controllers=*,-nodeipam,-nodelifecycle,-persistentvolume-binder,-attachdetach,-persistentvolume-expander,-cloud-node-lifecycle + --service-cidr=10.43.0.0/12 + && true + env: + [] + securityContext: + allowPrivilegeEscalation: false + volumeMounts: + - mountPath: /data + name: data + resources: + limits: + memory: 2Gi + requests: + cpu: 200m + memory: 256Mi + - name: syncer + image: "loftsh/vcluster:0.5.3" + args: + - --name={{.Name}} + livenessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + failureThreshold: 10 + initialDelaySeconds: 60 + periodSeconds: 2 + readinessProbe: + httpGet: + path: /readyz + port: 8443 + scheme: HTTPS + failureThreshold: 30 + periodSeconds: 2 + securityContext: + allowPrivilegeEscalation: false + env: + - name: DEFAULT_IMAGE_REGISTRY + value: + volumeMounts: + - mountPath: /data + name: data + readOnly: true + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 128Mi diff --git a/test/vcluster/manifests/nginx-ingress-deploy.yaml b/test/vcluster/manifests/nginx-ingress-deploy.yaml new file mode 100644 index 00000000000..437b37436af --- /dev/null +++ b/test/vcluster/manifests/nginx-ingress-deploy.yaml @@ -0,0 +1,685 @@ + +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + +--- +# Source: ingress-nginx/templates/controller-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +automountServiceAccountToken: true +--- +# Source: ingress-nginx/templates/controller-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: + allow-snippet-annotations: 'true' +--- +# Source: ingress-nginx/templates/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - '' + resources: + - nodes + verbs: + - get + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +--- +# Source: ingress-nginx/templates/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - '' + resources: + - namespaces + verbs: + - get + - apiGroups: + - '' + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - ingress-controller-leader + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create + - apiGroups: + - '' + resources: + - events + verbs: + - create + - patch +--- +# Source: ingress-nginx/templates/controller-rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/controller-service-webhook.yaml +apiVersion: v1 +kind: Service +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + type: ClusterIP + ports: + - name: https-webhook + port: 443 + targetPort: webhook + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-service.yaml +apiVersion: v1 +kind: Service +metadata: + annotations: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + type: LoadBalancer + externalTrafficPolicy: Local + ipFamilyPolicy: SingleStack + ipFamilies: + - IPv4 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller +--- +# Source: ingress-nginx/templates/controller-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: k8s.gcr.io/ingress-nginx/controller:v1.1.1@sha256:0bc88eb15f9e7f84e8e56c14fa5735aaa488b840983f87bd79b1054190e660de + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --enable-ssl-passthrough + securityContext: + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + runAsUser: 101 + allowPrivilegeEscalation: true + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP + - name: webhook + containerPort: 8443 + protocol: TCP + volumeMounts: + - name: webhook-cert + mountPath: /usr/local/certificates/ + readOnly: true + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +--- +# Source: ingress-nginx/templates/controller-ingressclass.yaml +# We don't support namespaced ingressClass yet +# So a ClusterRole and a ClusterRoleBinding is required +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: controller + name: nginx + namespace: ingress-nginx +spec: + controller: k8s.io/ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml +# before changing this value, check the required kubernetes version +# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + name: ingress-nginx-admission +webhooks: + - name: validate.nginx.ingress.kubernetes.io + matchPolicy: Equivalent + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + failurePolicy: Fail + sideEffects: None + admissionReviewVersions: + - v1 + clientConfig: + service: + namespace: ingress-nginx + name: ingress-nginx-controller-admission + path: /networking/v1/ingresses +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ingress-nginx-admission + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ingress-nginx-admission + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +rules: + - apiGroups: + - '' + resources: + - secrets + verbs: + - get + - create +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ingress-nginx-admission + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: ingress-nginx-admission-create + namespace: ingress-nginx + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: ingress-nginx-admission-create + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: create + image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660 + imagePullPolicy: IfNotPresent + args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 2000 +--- +# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: ingress-nginx-admission-patch + namespace: ingress-nginx + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook +spec: + template: + metadata: + name: ingress-nginx-admission-patch + labels: + helm.sh/chart: ingress-nginx-4.0.15 + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/version: 1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: admission-webhook + spec: + containers: + - name: patch + image: k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.1.1@sha256:64d8c73dca984af206adf9d6d7e46aa550362b1d7a01f3a0a91b20cc67868660 + imagePullPolicy: IfNotPresent + args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + restartPolicy: OnFailure + serviceAccountName: ingress-nginx-admission + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + runAsUser: 2000 diff --git a/test/vcluster/manifests/vcluster-ingress.yaml.tpl b/test/vcluster/manifests/vcluster-ingress.yaml.tpl new file mode 100644 index 00000000000..d07ac18cc04 --- /dev/null +++ b/test/vcluster/manifests/vcluster-ingress.yaml.tpl @@ -0,0 +1,22 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + name: {{.Name}} + namespace: {{.Name}} +spec: + rules: + - host: {{.Name}}.k3s + http: + paths: + - backend: + service: + name: {{.Name}} + port: + number: 443 + path: / + pathType: ImplementationSpecific diff --git a/test/vcluster/manifests/vcluster-values.yaml.tpl b/test/vcluster/manifests/vcluster-values.yaml.tpl new file mode 100644 index 00000000000..07cc88219ea --- /dev/null +++ b/test/vcluster/manifests/vcluster-values.yaml.tpl @@ -0,0 +1,5 @@ +serviceCIDR: 10.43.0.0/16 + +syncer: + extraArgs: + - --tls-san={{.Name}}.k3s diff --git a/test/vcluster/vcluster.go b/test/vcluster/vcluster.go new file mode 100644 index 00000000000..8455855d594 --- /dev/null +++ b/test/vcluster/vcluster.go @@ -0,0 +1,291 @@ +package vcluster + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "text/template" + + "github.com/weaveworks/weave-gitops/pkg/kube" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/clientcmd" + + _ "embed" +) + +//go:embed manifests/vcluster-values.yaml.tpl +var vclusterValues string + +//go:embed manifests/vcluster-ingress.yaml.tpl +var vclusterIngress string + +//go:embed manifests/nginx-ingress-deploy.yaml +var nginxIngressManifests string + +type Factory interface { + Create(ctx context.Context, name string) (client.Client, error) + Delete(ctx context.Context, name string) error +} + +type factory struct { + hostClient client.Client +} + +func NewFactory() (Factory, error) { + cfg, err := config.GetConfig() + if err != nil { + return nil, err + } + + c, err := client.New(cfg, client.Options{}) + if err != nil { + return nil, err + } + + return &factory{ + hostClient: c, + }, nil +} + +func (c *factory) Create(ctx context.Context, name string) (client.Client, error) { + namespaceObj := &corev1.Namespace{} + namespaceObj.Name = name + + if err := c.hostClient.Create(ctx, namespaceObj); err != nil { + if !strings.Contains(err.Error(), "already exists") { + return nil, fmt.Errorf("failed creating namespace %s: %w", name, err) + } + } + + if err := createCluster(name); err != nil { + return nil, fmt.Errorf("failed creating cluster: %w", err) + } + + configPath, err := connectCluster(name) + if err != nil { + return nil, fmt.Errorf("failed connecting cluster: %w", err) + } + + kubeClientConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: configPath}, &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { + return nil, fmt.Errorf("failed getting vcluster client config: %w", err) + } + + return client.New(kubeClientConfig, client.Options{ + Scheme: kube.CreateScheme(), + }) +} + +func (c *factory) Delete(ctx context.Context, name string) error { + return nil +} + +func createCluster(name string) error { + if err := createClusterIngress(name); err != nil { + return fmt.Errorf("failed creating cluster ingress: %w", err) + } + + if err := appendClusterToEtcHosts(name); err != nil { + return fmt.Errorf("failed appending cluster to /etc/hosts file: %w", err) + } + + filename, err := writeVclusterValuesToDisk(name) + if err != nil { + return err + } + + args := []string{ + "create", name, + "-n", name, + "-f", filename, + "--upgrade", + } + + output, err := exec.Command("vcluster", args...).CombinedOutput() + if err != nil { + return fmt.Errorf("error executing vcluster %s: %s", strings.Join(args, " "), string(output)) + } + + return nil +} + +func connectCluster(name string) (string, error) { + vKubeconfigFile, err := ioutil.TempFile(os.TempDir(), "vcluster_e2e_kubeconfig_") + if err != nil { + return "", fmt.Errorf("could not create a temporary file: %v", err) + } + + args := []string{ + "connect", name, + "-n", name, + "--kube-config", vKubeconfigFile.Name(), + "--server", fmt.Sprintf("https://%s.k3s", name), + } + + output, err := exec.Command("vcluster", args...).CombinedOutput() + if err != nil { + return "", fmt.Errorf("error executing vcluster %s: %s", strings.Join(args, " "), string(output)) + } + + return vKubeconfigFile.Name(), nil +} + +func writeVclusterValuesToDisk(name string) (string, error) { + tmpFile, err := ioutil.TempFile(os.TempDir(), "vcluster-values-") + if err != nil { + return "", fmt.Errorf("Cannot create temporary file: %w", err) + } + + values, err := executeTemplate(vclusterValues, name) + if err != nil { + return "", err + } + + // Example writing to the file + if _, err = tmpFile.Write(values); err != nil { + return "", fmt.Errorf("Failed to write to temporary file: %w", err) + } + + // Close the file + if err := tmpFile.Close(); err != nil { + return "", err + } + + return tmpFile.Name(), nil +} + +func appendClusterToEtcHosts(name string) error { + clusterEntry := fmt.Sprintf("172.10.0.150 %s.k3s\n", name) + + present, err := checkClusterIsPresent(clusterEntry) + if err != nil { + return err + } + + if present { + return nil + } + + f, err := os.OpenFile("/etc/hosts", os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + + defer f.Close() + + if _, err = f.WriteString(clusterEntry); err != nil { + return err + } + + return nil +} + +func checkClusterIsPresent(entry string) (bool, error) { + b, err := ioutil.ReadFile("/etc/hosts") + if err != nil { + return false, err + } + + isExist, err := regexp.Match(entry, b) + if err != nil { + return false, err + } + + return isExist, nil +} + +func executeTemplate(tplData string, clusterName string) ([]byte, error) { + template, err := template.New(clusterName).Parse(tplData) + if err != nil { + return nil, fmt.Errorf("error parsing template %s: %w", clusterName, err) + } + + yaml := &bytes.Buffer{} + + err = template.Execute(yaml, map[string]string{ + "Name": clusterName, + }) + if err != nil { + return nil, fmt.Errorf("error injecting values to template: %w", err) + } + + return yaml.Bytes(), nil +} + +func createClusterIngress(name string) error { + tmpFile, err := ioutil.TempFile(os.TempDir(), "vcluster-ingress-") + if err != nil { + return fmt.Errorf("Cannot create temporary file: %w", err) + } + + // defer os.Remove(tmpFile.Name()) + + values, err := executeTemplate(vclusterIngress, name) + if err != nil { + return err + } + + // Example writing to the file + if _, err = tmpFile.Write(values); err != nil { + return fmt.Errorf("Failed to write to temporary file: %w", err) + } + + // Close the file + if err := tmpFile.Close(); err != nil { + return err + } + + args := []string{ + "apply", + "-f", tmpFile.Name(), + } + + output, err := exec.Command("kubectl", args...).CombinedOutput() + if err != nil { + return fmt.Errorf("error applying ingress manifests with kubectl %s: %s", strings.Join(args, " "), string(output)) + } + + return nil +} + +func InstallNginxIngressController() error { + tmpFile, err := ioutil.TempFile(os.TempDir(), "vcluster-ingress-") + if err != nil { + return fmt.Errorf("Cannot create temporary file: %w", err) + } + + if _, err = tmpFile.Write([]byte(nginxIngressManifests)); err != nil { + return fmt.Errorf("Failed to write to temporary file: %w", err) + } + + if err := tmpFile.Close(); err != nil { + return err + } + + args := []string{ + "apply", + "-f", tmpFile.Name(), + } + + output, err := exec.Command("kubectl", args...).CombinedOutput() + if err != nil { + return fmt.Errorf("error applying ingress manifests with kubectl %s: %s", strings.Join(args, " "), string(output)) + } + + waitCmd := `while [[ $(kubectl get pods -n ingress-nginx -l app.kubernetes.io/component=controller -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do echo "waiting for pod" && sleep 1; done` + + output, err = exec.Command("bash", "-c", waitCmd).CombinedOutput() + if err != nil { + return fmt.Errorf("error waiting ingress controller to be ready. output=%s error=%s", string(output), err) + } + + return nil +} From b3149a55210cab3f641f59211823f1897fa2dc6a Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Tue, 8 Feb 2022 15:32:47 -0300 Subject: [PATCH 02/17] Checking vcluster accessibility --- kubeconfig.yaml | 19 -------- .../vcluster/vcluster_suite_test.go | 10 +++++ test/vcluster/vcluster.go | 44 +++++++++++++++++-- 3 files changed, 50 insertions(+), 23 deletions(-) delete mode 100644 kubeconfig.yaml diff --git a/kubeconfig.yaml b/kubeconfig.yaml deleted file mode 100644 index b2721211377..00000000000 --- a/kubeconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyTkRNNU9UVXlNRE13SGhjTk1qSXdNakEwTVRjeU1EQXpXaGNOTXpJd01qQXlNVGN5TURBegpXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyTkRNNU9UVXlNRE13V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFTOWNuenVrdis0OUZwdDl4djQyQzVWNGZseFhjTFd1a1VOMUJiM0tKaE4KUEhGVWNxNktZdFJDZnNZQmkrSXlWalNqcnIzM05ZRWNEb2JtYlJtWW12Z0pvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVVdPNUxwVWpWSjJkM0cvaDFWVXdPCmNuR2ZsZTh3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQVBOYnBXWGl6U3pEeEMwWHE1WWtKVThBR3NvM1ZpWU0KL3d0VCtObXVCYWJRQWlFQTRSUTI0SVgyOUpsTUIwekkxL0xIQzA2M0N1bjVUZk9qYlgxd3pJTmVZRG89Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - server: https://k3s:6443 - name: default -contexts: -- context: - cluster: default - user: default - name: default -current-context: default -kind: Config -preferences: {} -users: -- name: default - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrakNDQVRlZ0F3SUJBZ0lJREJrVWN1bk9yRjR3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOalF6T1RrMU1qQXpNQjRYRFRJeU1ESXdOREUzTWpBd00xb1hEVEl6TURJdwpOREUzTWpBd00xb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJLQ2NyMnN4MU9leTV4WUwKVmhDd0ZBS3FOdVlGaGZqUlltOTY1cmRIU2lZQWdxaHV5ZXdVblBhcEtZYXRpS1l5TzRKRjMvUUtTWU9yVDMwLwo3dTdtbnF1alNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCVDk0ZjdHS1NhSkhNWXl6R1ZrZ2R5K0tDYkFhVEFLQmdncWhrak9QUVFEQWdOSkFEQkcKQWlFQTZJUm9sRWtRdjVITFkxUXRBTmt3ZnVDZzIvMTUyS1UwTGhXa004QVVMYk1DSVFDaWpzR1daRDNCN2x1agpXL3c4b0tqdVVpViszclpZL013ODllT285a1dvU1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZHpDQ0FSMmdBd0lCQWdJQkFEQUtCZ2dxaGtqT1BRUURBakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwClpXNTBMV05oUURFMk5ETTVPVFV5TURNd0hoY05Nakl3TWpBME1UY3lNREF6V2hjTk16SXdNakF5TVRjeU1EQXoKV2pBak1TRXdId1lEVlFRRERCaHJNM010WTJ4cFpXNTBMV05oUURFMk5ETTVPVFV5TURNd1dUQVRCZ2NxaGtqTwpQUUlCQmdncWhrak9QUU1CQndOQ0FBUmx2S3hTVlhicnlXcWdUdDZnZ2k2Qyt4dTJjL1NzMDd5eGo1am9JT2paCjltR043SWFJZmJTVWtSYW10NEl1WDhQS0creXVQQ25KNjRQaktkVC9zT3VqbzBJd1FEQU9CZ05WSFE4QkFmOEUKQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVUvZUgreGlrbWlSekdNc3hsWklIYwp2aWdtd0drd0NnWUlLb1pJemowRUF3SURTQUF3UlFJaEFJdHNTMFY1OE5TNjh6ZnhnSVEwdHpwSW03UURZdlFYCkV2ejVBTU02bkMzNUFpQWRjK2R2bmdUQXhzWFdwRUhKNGQ0MCtIK0lSd290OCtkbThEbi9zN3JacVE9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSVBOUk8xcWc1dk5WdER2c1YrV1lTMi81MHAzbnc0NXZ3UXJreVA0M0N6MkhvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFb0p5dmF6SFU1N0xuRmd0V0VMQVVBcW8yNWdXRitORmliM3JtdDBkS0pnQ0NxRzdKN0JTYwo5cWtwaHEySXBqSTdna1hmOUFwSmc2dFBmVC91N3VhZXF3PT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/test/acceptance/vcluster/vcluster_suite_test.go b/test/acceptance/vcluster/vcluster_suite_test.go index cd112608cf4..fbd3bc4fc33 100644 --- a/test/acceptance/vcluster/vcluster_suite_test.go +++ b/test/acceptance/vcluster/vcluster_suite_test.go @@ -9,6 +9,16 @@ import ( ) func TestVcluster(t *testing.T) { + if err := vcluster.UpdateHostKubeconfig(); err != nil { + t.Errorf("failed updating host kubeconfig: %w", err) + t.FailNow() + } + + if err := vcluster.WaitClusterConnectivity(); err != nil { + t.Errorf("failed waiting cluster to be ready: %w", err) + t.FailNow() + } + if err := vcluster.InstallNginxIngressController(); err != nil { t.Errorf("failed installing ingress controller: %w", err) t.FailNow() diff --git a/test/vcluster/vcluster.go b/test/vcluster/vcluster.go index 8455855d594..5f8b8d9f799 100644 --- a/test/vcluster/vcluster.go +++ b/test/vcluster/vcluster.go @@ -10,12 +10,14 @@ import ( "regexp" "strings" "text/template" + "time" "github.com/weaveworks/weave-gitops/pkg/kube" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/clientcmd" _ "embed" @@ -79,9 +81,21 @@ func (c *factory) Create(ctx context.Context, name string) (client.Client, error return nil, fmt.Errorf("failed getting vcluster client config: %w", err) } - return client.New(kubeClientConfig, client.Options{ - Scheme: kube.CreateScheme(), + kubeClientConfig.Timeout = 500 * time.Millisecond + + var vclusterClient client.Client + + err = wait.Poll(time.Second, 5*time.Second, func() (bool, error) { + fmt.Println("creating cluster client...") + vclusterClient, err = client.New(kubeClientConfig, client.Options{Scheme: kube.CreateScheme()}) + if err != nil { + return false, nil + } + + return true, nil }) + + return vclusterClient, err } func (c *factory) Delete(ctx context.Context, name string) error { @@ -226,8 +240,6 @@ func createClusterIngress(name string) error { return fmt.Errorf("Cannot create temporary file: %w", err) } - // defer os.Remove(tmpFile.Name()) - values, err := executeTemplate(vclusterIngress, name) if err != nil { return err @@ -289,3 +301,27 @@ func InstallNginxIngressController() error { return nil } + +func UpdateHostKubeconfig() error { + kubeconfig := os.Getenv("KUBECONFIG") + + input, err := ioutil.ReadFile(kubeconfig) + if err != nil { + return err + } + + output := bytes.Replace(input, []byte("127.0.0.1"), []byte("k3s"), -1) + + return ioutil.WriteFile(kubeconfig, output, 0666) +} + +func WaitClusterConnectivity() error { + waitCmd := `while ! kubectl version; do echo "waiting for cluster connectivity" && sleep 1; done` + + output, err := exec.Command("bash", "-c", waitCmd).CombinedOutput() + if err != nil { + return fmt.Errorf("error waiting cluster to be ready. output=%s error=%s", string(output), err) + } + + return nil +} From a68679e1f1e90462044588eb8530745ea0ca23eb Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 10:07:43 -0300 Subject: [PATCH 03/17] Deleting cluster --- .gitignore | 3 ++ test/acceptance/vcluster/acceptance.go | 17 ++++-- test/vcluster/vcluster.go | 73 +++++++++++++++----------- 3 files changed, 60 insertions(+), 33 deletions(-) diff --git a/.gitignore b/.gitignore index 8b181822baa..67fc03159e9 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,6 @@ ginkgo.report test/library/wego-library-test tilt_modules .envrc + +# generate by k3s when running acceptance tests +kubeconfig.yaml diff --git a/test/acceptance/vcluster/acceptance.go b/test/acceptance/vcluster/acceptance.go index a465248b861..a0e29bd4b60 100644 --- a/test/acceptance/vcluster/acceptance.go +++ b/test/acceptance/vcluster/acceptance.go @@ -11,10 +11,17 @@ import ( ) var _ = Describe("Acceptance PoC", func() { + var ( + clusterFactory vcluster.Factory + clusterName string + ) + BeforeEach(func() { - clusterFactory, err := vcluster.NewFactory() + var err error + clusterName = "test-" + rand.String(10) + clusterFactory, err = vcluster.NewFactory() Expect(err).To(BeNil(), "creating new factory") - client, err := clusterFactory.Create(context.TODO(), "test-"+rand.String(10)) + client, err := clusterFactory.Create(context.TODO(), clusterName) Expect(err).To(BeNil(), "creating new cluster") namespaceObj := &corev1.Namespace{} @@ -22,7 +29,11 @@ var _ = Describe("Acceptance PoC", func() { Expect(client.Create(context.TODO(), namespaceObj)).To(Succeed()) }) - It("Verify that gitops-flux can print out the version of flux", func() { + AfterEach(func() { + Expect(clusterFactory.Delete(context.TODO(), clusterName)).To(Succeed()) + }) + + It("Testing creation and deletion of a vcluster", func() { }) }) diff --git a/test/vcluster/vcluster.go b/test/vcluster/vcluster.go index 5f8b8d9f799..d1e08cfcfed 100644 --- a/test/vcluster/vcluster.go +++ b/test/vcluster/vcluster.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "os/exec" + "path/filepath" "regexp" "strings" "text/template" @@ -99,6 +100,41 @@ func (c *factory) Create(ctx context.Context, name string) (client.Client, error } func (c *factory) Delete(ctx context.Context, name string) error { + args := []string{ + "delete", name, + "--delete-namespace", + "-n", name, + } + + output, err := exec.Command("vcluster", args...).CombinedOutput() + if err != nil { + return fmt.Errorf("error executing vcluster %s: %s", strings.Join(args, " "), string(output)) + } + + return nil +} + +func UpdateHostKubeconfig() error { + kubeconfig := os.Getenv("KUBECONFIG") + + input, err := ioutil.ReadFile(kubeconfig) + if err != nil { + return err + } + + output := bytes.Replace(input, []byte("127.0.0.1"), []byte("k3s"), -1) + + return ioutil.WriteFile(kubeconfig, output, 0666) +} + +func WaitClusterConnectivity() error { + waitCmd := `while ! kubectl version; do echo "waiting for cluster connectivity" && sleep 1; done` + + output, err := exec.Command("bash", "-c", waitCmd).CombinedOutput() + if err != nil { + return fmt.Errorf("error waiting cluster to be ready. output=%s error=%s", string(output), err) + } + return nil } @@ -132,15 +168,12 @@ func createCluster(name string) error { } func connectCluster(name string) (string, error) { - vKubeconfigFile, err := ioutil.TempFile(os.TempDir(), "vcluster_e2e_kubeconfig_") - if err != nil { - return "", fmt.Errorf("could not create a temporary file: %v", err) - } + vKubeconfigFile := vclusterKubeconfigFile(name) args := []string{ "connect", name, "-n", name, - "--kube-config", vKubeconfigFile.Name(), + "--kube-config", vKubeconfigFile, "--server", fmt.Sprintf("https://%s.k3s", name), } @@ -149,7 +182,11 @@ func connectCluster(name string) (string, error) { return "", fmt.Errorf("error executing vcluster %s: %s", strings.Join(args, " "), string(output)) } - return vKubeconfigFile.Name(), nil + return vKubeconfigFile, nil +} + +func vclusterKubeconfigFile(name string) string { + return filepath.Join(os.TempDir(), "vcluster-kubeconfig-"+name) } func writeVclusterValuesToDisk(name string) (string, error) { @@ -301,27 +338,3 @@ func InstallNginxIngressController() error { return nil } - -func UpdateHostKubeconfig() error { - kubeconfig := os.Getenv("KUBECONFIG") - - input, err := ioutil.ReadFile(kubeconfig) - if err != nil { - return err - } - - output := bytes.Replace(input, []byte("127.0.0.1"), []byte("k3s"), -1) - - return ioutil.WriteFile(kubeconfig, output, 0666) -} - -func WaitClusterConnectivity() error { - waitCmd := `while ! kubectl version; do echo "waiting for cluster connectivity" && sleep 1; done` - - output, err := exec.Command("bash", "-c", waitCmd).CombinedOutput() - if err != nil { - return fmt.Errorf("error waiting cluster to be ready. output=%s error=%s", string(output), err) - } - - return nil -} From ef412490f05dc0300b55d1cf69ed31d0e2747526 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 10:10:54 -0300 Subject: [PATCH 04/17] Adding acceptance gh action --- .github/workflows/acceptance.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .github/workflows/acceptance.yaml diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml new file mode 100644 index 00000000000..602e6a0c62d --- /dev/null +++ b/.github/workflows/acceptance.yaml @@ -0,0 +1,9 @@ +name: Acceptance Tests +on: push +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Run acceptance + run: docker-compose run test From 6f897fc7591cdc72d7f5c4101cd0de22d0e4679c Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 11:29:31 -0300 Subject: [PATCH 05/17] Adding arch --- test.dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test.dockerfile b/test.dockerfile index e926ceb0a52..4a4509cbc8b 100644 --- a/test.dockerfile +++ b/test.dockerfile @@ -9,7 +9,8 @@ RUN apt-get install -y apt-transport-https ca-certificates curl openssh-client & apt-get update && \ apt-get install -y kubectl -RUN curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-arm64)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; +RUN export ARCH=$(arch) +RUN curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-$ARCH)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; RUN mv vcluster /usr/local/bin; RUN curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash From 91100663e18549c7756672fd800fdd8bee4f0ffe Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 11:32:18 -0300 Subject: [PATCH 06/17] Using amd64 --- test.dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test.dockerfile b/test.dockerfile index 4a4509cbc8b..360466a9d7f 100644 --- a/test.dockerfile +++ b/test.dockerfile @@ -9,8 +9,7 @@ RUN apt-get install -y apt-transport-https ca-certificates curl openssh-client & apt-get update && \ apt-get install -y kubectl -RUN export ARCH=$(arch) -RUN curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-$ARCH)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; +RUN curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-amd64)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; RUN mv vcluster /usr/local/bin; RUN curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash From 5dfbb299c3652dbb594dc38ebcfa2897f36ab812 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 12:14:24 -0300 Subject: [PATCH 07/17] Improving caching --- .github/workflows/acceptance.yaml | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml index 602e6a0c62d..39f17736af7 100644 --- a/.github/workflows/acceptance.yaml +++ b/.github/workflows/acceptance.yaml @@ -5,5 +5,34 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 + - name: Set up Docker Buildx + id: buildx + uses: docker/setup-buildx-action@master + - name: Cache Docker layers + uses: actions/cache@v2 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-step1-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-buildx- + - name: Build and push images + uses: docker/build-push-action@v2 + with: + push: false + load: true + builder: ${{ steps.buildx.outputs.name }} + tags: weaveworks/weave-gitops-test-cache + file: ./test.dockerfile + context: . + cache-from: type=local,src=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache + # Temp fix + # https://github.com/docker/build-push-action/issues/252 + # https://github.com/moby/buildkit/issues/1896 + - name: Move cache + run: | + rm -rf /tmp/.buildx-cache + mv /tmp/.buildx-cache-new /tmp/.buildx-cache + - name: Run acceptance - run: docker-compose run test + run: COMPOSE_DOCKER_CLI_BUILD=1 docker-compose run test From ef33a685eca71f0e0bd2315ffa8f6cfd305414ed Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 12:40:47 -0300 Subject: [PATCH 08/17] changing cache dir --- .github/workflows/acceptance.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml index 39f17736af7..a288290a6f5 100644 --- a/.github/workflows/acceptance.yaml +++ b/.github/workflows/acceptance.yaml @@ -25,7 +25,7 @@ jobs: file: ./test.dockerfile context: . cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache + cache-to: type=local,dest=/tmp/.buildx-cache-new # Temp fix # https://github.com/docker/build-push-action/issues/252 # https://github.com/moby/buildkit/issues/1896 From bbba8cce7b830870c7e0e42fce70a190fc3adfcf Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 12:50:25 -0300 Subject: [PATCH 09/17] Trigger build From 96c9a4f9c8bae44cd530368a4e338853f7199575 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 13:11:10 -0300 Subject: [PATCH 10/17] changing cache dir --- .github/workflows/acceptance.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml index a288290a6f5..bfd4b95e5ab 100644 --- a/.github/workflows/acceptance.yaml +++ b/.github/workflows/acceptance.yaml @@ -7,7 +7,7 @@ jobs: - uses: actions/checkout@v2 - name: Set up Docker Buildx id: buildx - uses: docker/setup-buildx-action@master + uses: docker/setup-buildx-action@v1 - name: Cache Docker layers uses: actions/cache@v2 with: @@ -26,6 +26,10 @@ jobs: context: . cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache-new + + - name: Run acceptance + run: COMPOSE_DOCKER_CLI_BUILD=1 docker-compose run test + # Temp fix # https://github.com/docker/build-push-action/issues/252 # https://github.com/moby/buildkit/issues/1896 @@ -33,6 +37,3 @@ jobs: run: | rm -rf /tmp/.buildx-cache mv /tmp/.buildx-cache-new /tmp/.buildx-cache - - - name: Run acceptance - run: COMPOSE_DOCKER_CLI_BUILD=1 docker-compose run test From 19956ee7170a9b2fe9ef5a4ea71b4f3282499137 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 13:21:03 -0300 Subject: [PATCH 11/17] Trigger build From 7cf0864c666eef5811aaaaa37208e7d800fdf10b Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 13:52:31 -0300 Subject: [PATCH 12/17] Setting go caching --- .github/workflows/acceptance.yaml | 31 +++++-------------------------- test.dockerfile | 17 +++++------------ 2 files changed, 10 insertions(+), 38 deletions(-) diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml index bfd4b95e5ab..98f3e60291b 100644 --- a/.github/workflows/acceptance.yaml +++ b/.github/workflows/acceptance.yaml @@ -8,32 +8,11 @@ jobs: - name: Set up Docker Buildx id: buildx uses: docker/setup-buildx-action@v1 - - name: Cache Docker layers - uses: actions/cache@v2 + - uses: actions/cache@v2 with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-step1-${{ github.sha }} + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-buildx- - - name: Build and push images - uses: docker/build-push-action@v2 - with: - push: false - load: true - builder: ${{ steps.buildx.outputs.name }} - tags: weaveworks/weave-gitops-test-cache - file: ./test.dockerfile - context: . - cache-from: type=local,src=/tmp/.buildx-cache - cache-to: type=local,dest=/tmp/.buildx-cache-new - + ${{ runner.os }}-go- - name: Run acceptance - run: COMPOSE_DOCKER_CLI_BUILD=1 docker-compose run test - - # Temp fix - # https://github.com/docker/build-push-action/issues/252 - # https://github.com/moby/buildkit/issues/1896 - - name: Move cache - run: | - rm -rf /tmp/.buildx-cache - mv /tmp/.buildx-cache-new /tmp/.buildx-cache + run: docker-compose run -v $HOME/go/pkg/mod:/go/pkg/mod test diff --git a/test.dockerfile b/test.dockerfile index 360466a9d7f..d0be80063bc 100644 --- a/test.dockerfile +++ b/test.dockerfile @@ -1,18 +1,11 @@ # Go build FROM golang:1.17 AS go-build # Add a kubectl -RUN apt-get install -y apt-transport-https ca-certificates curl openssh-client && \ - curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg \ - https://packages.cloud.google.com/apt/doc/apt-key.gpg && \ - echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] \ - https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list && \ - apt-get update && \ - apt-get install -y kubectl - -RUN curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-amd64)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; -RUN mv vcluster /usr/local/bin; - -RUN curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ + && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl \ + && curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-amd64)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; \ + && mv vcluster /usr/local/bin; \ + && curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash RUN mkdir /app WORKDIR /app From c3aa9eb0a3f8f1a8090824b5d62bd141e0435111 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 13:58:22 -0300 Subject: [PATCH 13/17] Fixing runs --- test.dockerfile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test.dockerfile b/test.dockerfile index d0be80063bc..f8b081d73e1 100644 --- a/test.dockerfile +++ b/test.dockerfile @@ -3,9 +3,10 @@ FROM golang:1.17 AS go-build # Add a kubectl RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" \ && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl \ - && curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-amd64)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster && chmod +x vcluster; \ - && mv vcluster /usr/local/bin; \ - && curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + && curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash \ + && curl -s -L "https://github.com/loft-sh/vcluster/releases/latest" | sed -nE 's!.*"([^"]*vcluster-linux-amd64)".*!https://github.com\1!p' | xargs -n 1 curl -L -o vcluster \ + && chmod +x vcluster \ + && mv vcluster /usr/local/bin RUN mkdir /app WORKDIR /app From 9235741bb31e764c01b501d1085cbb7b282bf502 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 14:07:50 -0300 Subject: [PATCH 14/17] avoid adding modules in dockerfile --- docker-compose.yaml | 1 + test.dockerfile | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index 5d9d6f68e7c..cbec5d6b000 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -41,6 +41,7 @@ services: - KUBECONFIG=/app/kubeconfig.yaml volumes: - .:/app + - ~/go/pkg/mod:/go/pkg/mod working_dir: /app depends_on: - k3s diff --git a/test.dockerfile b/test.dockerfile index f8b081d73e1..827fae9a30c 100644 --- a/test.dockerfile +++ b/test.dockerfile @@ -10,6 +10,6 @@ RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/s RUN mkdir /app WORKDIR /app -COPY go.mod . -COPY go.sum . -RUN go mod download +# COPY go.mod . +# COPY go.sum . +# RUN go mod download From 59b887c76d61761c3ba20e12d4576fff815f5df5 Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 14:14:59 -0300 Subject: [PATCH 15/17] Picking another key --- .github/workflows/acceptance.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml index 98f3e60291b..0f24c360d17 100644 --- a/.github/workflows/acceptance.yaml +++ b/.github/workflows/acceptance.yaml @@ -11,8 +11,8 @@ jobs: - uses: actions/cache@v2 with: path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + key: ${{ runner.os }}-gomod-${{ hashFiles('**/go.sum') }} restore-keys: | - ${{ runner.os }}-go- + ${{ runner.os }}-gomod- - name: Run acceptance run: docker-compose run -v $HOME/go/pkg/mod:/go/pkg/mod test From 63fc2c8e9c085386213ce7924c58d3087589fe9d Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 14:28:40 -0300 Subject: [PATCH 16/17] avoid adding modules in dockerfile --- .github/workflows/acceptance.yaml | 2 ++ docker-compose.yaml | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml index 0f24c360d17..c5528065e12 100644 --- a/.github/workflows/acceptance.yaml +++ b/.github/workflows/acceptance.yaml @@ -16,3 +16,5 @@ jobs: ${{ runner.os }}-gomod- - name: Run acceptance run: docker-compose run -v $HOME/go/pkg/mod:/go/pkg/mod test + - name: Give go modules permissions + run: chown -R $USER:$USER ~/go/pkg/mod diff --git a/docker-compose.yaml b/docker-compose.yaml index cbec5d6b000..ebbc65ec079 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -40,8 +40,8 @@ services: environment: - KUBECONFIG=/app/kubeconfig.yaml volumes: + - gomod:/go/pkg/mod - .:/app - - ~/go/pkg/mod:/go/pkg/mod working_dir: /app depends_on: - k3s @@ -49,6 +49,7 @@ services: - cluster volumes: k3s-server: {} + gomod: {} networks: cluster: From a60be179796c7c63588c5248ef6cd9f462c763ee Mon Sep 17 00:00:00 2001 From: Luiz Filho Date: Wed, 9 Feb 2022 14:33:23 -0300 Subject: [PATCH 17/17] Using sudo --- .github/workflows/acceptance.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml index c5528065e12..e5f20edc1ee 100644 --- a/.github/workflows/acceptance.yaml +++ b/.github/workflows/acceptance.yaml @@ -17,4 +17,4 @@ jobs: - name: Run acceptance run: docker-compose run -v $HOME/go/pkg/mod:/go/pkg/mod test - name: Give go modules permissions - run: chown -R $USER:$USER ~/go/pkg/mod + run: sudo chown -R $USER:$USER ~/go/pkg/mod