From c4df74f9ca86e76281981bcddd4318dbd1c0afa3 Mon Sep 17 00:00:00 2001 From: Gabriel Mendes da Silva Date: Wed, 30 Mar 2022 18:33:42 -0700 Subject: [PATCH] Allow users to pass extra vsphere credentials This will allow users to provide different credentials for cloud provider and CSI driver --- pkg/providers/vsphere/config/template-cp.yaml | 8 +- ...ud_provder_and_csi_driver_credentials.yaml | 1166 +++++++++++++++++ ...ts_main_cp_cloud_provider_credentials.yaml | 1166 +++++++++++++++++ ...esults_main_cp_csi_driver_credentials.yaml | 1166 +++++++++++++++++ pkg/providers/vsphere/vsphere.go | 63 +- pkg/providers/vsphere/vsphere_test.go | 75 ++ 6 files changed, 3623 insertions(+), 21 deletions(-) create mode 100644 pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provder_and_csi_driver_credentials.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provider_credentials.yaml create mode 100644 pkg/providers/vsphere/testdata/expected_results_main_cp_csi_driver_credentials.yaml diff --git a/pkg/providers/vsphere/config/template-cp.yaml b/pkg/providers/vsphere/config/template-cp.yaml index 52d17fc78fe6..3586022f3b27 100644 --- a/pkg/providers/vsphere/config/template-cp.yaml +++ b/pkg/providers/vsphere/config/template-cp.yaml @@ -577,8 +577,8 @@ stringData: thumbprint = "{{.thumbprint}}" [VirtualCenter "{{.vsphereServer}}"] - user = "{{.eksaVsphereUsername}}" - password = "{{.eksaVspherePassword}}" + user = "{{.eksaCSIUsername}}" + password = "{{.eksaCSIPassword}}" datacenters = "{{.vsphereDatacenter}}" insecure-flag = "{{.insecure}}" @@ -1043,8 +1043,8 @@ stringData: name: cloud-provider-vsphere-credentials namespace: kube-system stringData: - {{.vsphereServer}}.password: "{{.eksaVspherePassword}}" - {{.vsphereServer}}.username: "{{.eksaVsphereUsername}}" + {{.vsphereServer}}.password: "{{.eksaCloudProviderPassword}}" + {{.vsphereServer}}.username: "{{.eksaCloudProviderUsername}}" type: Opaque type: addons.cluster.x-k8s.io/resource-set --- diff --git a/pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provder_and_csi_driver_credentials.yaml b/pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provder_and_csi_driver_credentials.yaml new file mode 100644 index 000000000000..64ec4c354999 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provder_and_csi_driver_credentials.yaml @@ -0,0 +1,1166 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 + kind: EtcdadmCluster + name: test-etcd +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-control-plane-template-1234567890000 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/etc/kubernetes/pki/etcd/ca.crt" + certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" + keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/plunder-app/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + taints: [] + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 3 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-crs-0 + namespace: eksa-system +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: cloud-config + cloudInitConfig: + version: 3.4.14 + installDir: "/usr/bin" + preEtcdadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-etcd-template-1234567890000 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-etcd-template-1234567890000 + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "vsphere_username" + password: "vsphere_password" +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + cluster-id = "default/test" + thumbprint = "ABCDEFG" + + [VirtualCenter "vsphere_server"] + user = "EksavSphereCSIUsername" + password = "EksavSphereCSIPassword" + datacenters = "SDDC-Datacenter" + insecure-flag = "false" + + [Network] + public-network = "/SDDC-Datacenter/network/sddc-cgw-network-1" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.1.0-eks-1-19-4 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v3.1.0-eks-1-19-4 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v2.1.1-eks-1-19-4 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: v1 + data: + csi-migration: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: kube-system +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: eksa-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "EksavSphereCPPassword" + vsphere_server.username: "EksavSphereCPUsername" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provider_credentials.yaml b/pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provider_credentials.yaml new file mode 100644 index 000000000000..6df7b5aa2950 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_main_cp_cloud_provider_credentials.yaml @@ -0,0 +1,1166 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 + kind: EtcdadmCluster + name: test-etcd +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-control-plane-template-1234567890000 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/etc/kubernetes/pki/etcd/ca.crt" + certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" + keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/plunder-app/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + taints: [] + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 3 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-crs-0 + namespace: eksa-system +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: cloud-config + cloudInitConfig: + version: 3.4.14 + installDir: "/usr/bin" + preEtcdadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-etcd-template-1234567890000 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-etcd-template-1234567890000 + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "vsphere_username" + password: "vsphere_password" +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + cluster-id = "default/test" + thumbprint = "ABCDEFG" + + [VirtualCenter "vsphere_server"] + user = "vsphere_username" + password = "vsphere_password" + datacenters = "SDDC-Datacenter" + insecure-flag = "false" + + [Network] + public-network = "/SDDC-Datacenter/network/sddc-cgw-network-1" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.1.0-eks-1-19-4 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v3.1.0-eks-1-19-4 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v2.1.1-eks-1-19-4 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: v1 + data: + csi-migration: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: kube-system +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: eksa-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "EksavSphereCPPassword" + vsphere_server.username: "EksavSphereCPUsername" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/testdata/expected_results_main_cp_csi_driver_credentials.yaml b/pkg/providers/vsphere/testdata/expected_results_main_cp_csi_driver_credentials.yaml new file mode 100644 index 000000000000..8fc3ab1a6561 --- /dev/null +++ b/pkg/providers/vsphere/testdata/expected_results_main_cp_csi_driver_credentials.yaml @@ -0,0 +1,1166 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test + namespace: eksa-system +spec: + clusterNetwork: + pods: + cidrBlocks: [192.168.0.0/16] + services: + cidrBlocks: [10.96.0.0/12] + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: test + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereCluster + name: test + managedExternalEtcdRef: + apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 + kind: EtcdadmCluster + name: test-etcd +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereCluster +metadata: + name: test + namespace: eksa-system +spec: + controlPlaneEndpoint: + host: 1.2.3.4 + port: 6443 + identityRef: + kind: Secret + name: test-vsphere-credentials + server: vsphere_server + thumbprint: 'ABCDEFG' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-control-plane-template-1234567890000 + namespace: eksa-system +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 2 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: test + namespace: eksa-system +spec: + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-control-plane-template-1234567890000 + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: public.ecr.aws/eks-distro/kubernetes + etcd: + external: + endpoints: [] + caFile: "/etc/kubernetes/pki/etcd/ca.crt" + certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" + keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + apiServer: + extraArgs: + cloud-provider: external + audit-policy-file: /etc/kubernetes/audit-policy.yaml + audit-log-path: /var/log/kubernetes/api-audit.log + audit-log-maxage: "30" + audit-log-maxbackup: "10" + audit-log-maxsize: "512" + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + extraVolumes: + - hostPath: /etc/kubernetes/audit-policy.yaml + mountPath: /etc/kubernetes/audit-policy.yaml + name: audit-policy + pathType: File + readOnly: true + - hostPath: /var/log/kubernetes + mountPath: /var/log/kubernetes + name: audit-log-dir + pathType: DirectoryOrCreate + readOnly: false + controllerManager: + extraArgs: + cloud-provider: external + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + scheduler: + extraArgs: + profiling: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: 1.2.3.4 + image: public.ecr.aws/l0g8r8j6/plunder-app/kube-vip:v0.3.2-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + - content: | + apiVersion: audit.k8s.io/v1beta1 + kind: Policy + rules: + # Log aws-auth configmap changes + - level: RequestResponse + namespaces: ["kube-system"] + verbs: ["update", "patch", "delete"] + resources: + - group: "" # core + resources: ["configmaps"] + resourceNames: ["aws-auth"] + omitStages: + - "RequestReceived" + # The following requests were manually identified as high-volume and low-risk, + # so drop them. + - level: None + users: ["system:kube-proxy"] + verbs: ["watch"] + resources: + - group: "" # core + resources: ["endpoints", "services", "services/status"] + - level: None + users: ["kubelet"] # legacy kubelet identity + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + userGroups: ["system:nodes"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["nodes", "nodes/status"] + - level: None + users: + - system:kube-controller-manager + - system:kube-scheduler + - system:serviceaccount:kube-system:endpoint-controller + verbs: ["get", "update"] + namespaces: ["kube-system"] + resources: + - group: "" # core + resources: ["endpoints"] + - level: None + users: ["system:apiserver"] + verbs: ["get"] + resources: + - group: "" # core + resources: ["namespaces", "namespaces/status", "namespaces/finalize"] + # Don't log HPA fetching metrics. + - level: None + users: + - system:kube-controller-manager + verbs: ["get", "list"] + resources: + - group: "metrics.k8s.io" + # Don't log these read-only URLs. + - level: None + nonResourceURLs: + - /healthz* + - /version + - /swagger* + # Don't log events requests. + - level: None + resources: + - group: "" # core + resources: ["events"] + # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes + - level: Request + users: ["kubelet", "system:node-problem-detector", "system:serviceaccount:kube-system:node-problem-detector"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + - level: Request + userGroups: ["system:nodes"] + verbs: ["update","patch"] + resources: + - group: "" # core + resources: ["nodes/status", "pods/status"] + omitStages: + - "RequestReceived" + # deletecollection calls can be large, don't log responses for expected namespace deletions + - level: Request + users: ["system:serviceaccount:kube-system:namespace-controller"] + verbs: ["deletecollection"] + omitStages: + - "RequestReceived" + # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data, + # so only log at the Metadata level. + - level: Metadata + resources: + - group: "" # core + resources: ["secrets", "configmaps"] + - group: authentication.k8s.io + resources: ["tokenreviews"] + omitStages: + - "RequestReceived" + - level: Request + resources: + - group: "" + resources: ["serviceaccounts/token"] + # Get repsonses can be large; skip them. + - level: Request + verbs: ["get", "list", "watch"] + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for known APIs + - level: RequestResponse + resources: + - group: "" # core + - group: "admissionregistration.k8s.io" + - group: "apiextensions.k8s.io" + - group: "apiregistration.k8s.io" + - group: "apps" + - group: "authentication.k8s.io" + - group: "authorization.k8s.io" + - group: "autoscaling" + - group: "batch" + - group: "certificates.k8s.io" + - group: "extensions" + - group: "metrics.k8s.io" + - group: "networking.k8s.io" + - group: "policy" + - group: "rbac.authorization.k8s.io" + - group: "scheduling.k8s.io" + - group: "settings.k8s.io" + - group: "storage.k8s.io" + omitStages: + - "RequestReceived" + # Default level for all other requests. + - level: Metadata + omitStages: + - "RequestReceived" + owner: root:root + path: /etc/kubernetes/audit-policy.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + taints: [] + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + taints: [] + preKubeadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + useExperimentalRetryJoin: true + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + format: cloud-config + replicas: 3 + version: v1.19.8-eks-1-19-4 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: test + name: test-crs-0 + namespace: eksa-system +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: test + resources: + - kind: Secret + name: vsphere-csi-controller + - kind: ConfigMap + name: vsphere-csi-controller-role + - kind: ConfigMap + name: vsphere-csi-controller-binding + - kind: Secret + name: csi-vsphere-config + - kind: ConfigMap + name: csi.vsphere.vmware.com + - kind: ConfigMap + name: vsphere-csi-node + - kind: ConfigMap + name: vsphere-csi-controller + - kind: Secret + name: cloud-controller-manager + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +kind: EtcdadmCluster +apiVersion: etcdcluster.cluster.x-k8s.io/v1beta1 +metadata: + name: test-etcd + namespace: eksa-system +spec: + replicas: 3 + etcdadmConfigSpec: + etcdadmBuiltin: true + format: cloud-config + cloudInitConfig: + version: 3.4.14 + installDir: "/usr/bin" + preEtcdadmCommands: + - hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >>/etc/hosts + - echo "{{ ds.meta_data.hostname }}" >/etc/hostname + cipherSuites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + users: + - name: capv + sshAuthorizedKeys: + - 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ==' + sudo: ALL=(ALL) NOPASSWD:ALL + infrastructureTemplate: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: test-etcd-template-1234567890000 +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: test-etcd-template-1234567890000 + namespace: 'eksa-system' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: SDDC-Datacenter + datastore: /SDDC-Datacenter/datastore/WorkloadDatastore + diskGiB: 25 + folder: '/SDDC-Datacenter/vm' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: /SDDC-Datacenter/network/sddc-cgw-network-1 + numCPUs: 3 + resourcePool: '*/Resources' + server: vsphere_server + storagePolicyName: "vSAN Default Storage Policy" + template: /SDDC-Datacenter/vm/Templates/ubuntu-1804-kube-v1.19.6 + thumbprint: 'ABCDEFG' +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-vsphere-credentials + namespace: eksa-system + labels: + clusterctl.cluster.x-k8s.io/move: "true" +stringData: + username: "vsphere_username" + password: "vsphere_password" +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-csi-controller + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: csi-vsphere-config + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: csi-vsphere-config + namespace: kube-system + stringData: + csi-vsphere.conf: |+ + [Global] + cluster-id = "default/test" + thumbprint = "ABCDEFG" + + [VirtualCenter "vsphere_server"] + user = "EksavSphereCSIUsername" + password = "EksavSphereCSIPassword" + datacenters = "SDDC-Datacenter" + insecure-flag = "false" + + [Network] + public-network = "/SDDC-Datacenter/network/sddc-cgw-network-1" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - storage.k8s.io + resources: + - csidrivers + verbs: + - create + - delete + - apiGroups: + - "" + resources: + - nodes + - pods + - secrets + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - create + - delete + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - update + - patch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - get + - list +kind: ConfigMap +metadata: + name: vsphere-csi-controller-role + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: kube-system +kind: ConfigMap +metadata: + name: vsphere-csi-controller-binding + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true +kind: ConfigMap +metadata: + name: csi.vsphere.vmware.com + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: kube-system + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/node-driver-registrar:v2.1.0-eks-1-19-4 + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock + name: node-driver-registrar + resources: {} + securityContext: + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - args: + - --csi-address=/csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: Default + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: vsphere-csi-node + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: kube-system + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-attacher:v3.1.0-eks-1-19-4 + name: csi-attacher + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + - env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: X_CSI_MODE + value: controller + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: INFO + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 3 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/livenessprobe:v2.2.0-eks-1-19-4 + name: liveness-probe + resources: {} + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --leader-election + env: + - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-7c2690c880c6521afdd9ffa8d90443a11c6b817b + name: vsphere-syncer + resources: {} + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: public.ecr.aws/eks-distro/kubernetes-csi/external-provisioner:v2.1.1-eks-1-19-4 + name: csi-provisioner + resources: {} + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: csi-vsphere-config + - emptyDir: {} + name: socket-dir +kind: ConfigMap +metadata: + name: vsphere-csi-controller + namespace: eksa-system +--- +apiVersion: v1 +data: + data: | + apiVersion: v1 + data: + csi-migration: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: kube-system +kind: ConfigMap +metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: eksa-system +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-controller-manager + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + namespace: kube-system +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: eksa-system +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + vsphere_server.password: "vsphere_password" + vsphere_server.username: "vsphere_username" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: system:cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - '*' + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: system:cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: "ABCDEFG" + insecureFlag: false + vcenter: + vsphere_server: + datacenters: + - 'SDDC-Datacenter' + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + server: 'vsphere_server' + thumbprint: 'ABCDEFG' + kind: ConfigMap + metadata: + name: vsphere-cloud-config + namespace: kube-system + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + kind: Service + metadata: + labels: + component: cloud-controller-manager + name: cloud-controller-manager + namespace: kube-system + spec: + ports: + - port: 443 + protocol: TCP + targetPort: 43001 + selector: + component: cloud-controller-manager + type: NodePort + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + name: vsphere-cloud-controller-manager + namespace: kube-system + spec: + selector: + matchLabels: + k8s-app: vsphere-cloud-controller-manager + template: + metadata: + labels: + k8s-app: vsphere-cloud-controller-manager + spec: + containers: + - args: + - --v=2 + - --cloud-provider=vsphere + - --cloud-config=/etc/cloud/vsphere.conf + image: public.ecr.aws/l0g8r8j6/kubernetes/cloud-provider-vsphere/cpi/manager:v1.18.1-2093eaeda5a4567f0e516d652e0b25b1d7abc774 + name: vsphere-cloud-controller-manager + resources: + requests: + cpu: 200m + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + hostNetwork: true + serviceAccountName: cloud-controller-manager + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node.kubernetes.io/not-ready + volumes: + - configMap: + name: vsphere-cloud-config + name: vsphere-config-volume + updateStrategy: + type: RollingUpdate +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: eksa-system diff --git a/pkg/providers/vsphere/vsphere.go b/pkg/providers/vsphere/vsphere.go index d5af0e25cac4..d2182e6e0a34 100644 --- a/pkg/providers/vsphere/vsphere.go +++ b/pkg/providers/vsphere/vsphere.go @@ -36,21 +36,27 @@ import ( ) const ( - CredentialsObjectName = "vsphere-credentials" - EksavSphereUsernameKey = "EKSA_VSPHERE_USERNAME" - EksavSpherePasswordKey = "EKSA_VSPHERE_PASSWORD" - eksaLicense = "EKSA_LICENSE" - vSphereUsernameKey = "VSPHERE_USERNAME" - vSpherePasswordKey = "VSPHERE_PASSWORD" - vSphereServerKey = "VSPHERE_SERVER" - govcInsecure = "GOVC_INSECURE" - expClusterResourceSetKey = "EXP_CLUSTER_RESOURCE_SET" - defaultTemplateLibrary = "eks-a-templates" - defaultTemplatesFolder = "vm/Templates" - bottlerocketDefaultUser = "ec2-user" - ubuntuDefaultUser = "capv" - maxRetries = 30 - backOffPeriod = 5 * time.Second + CredentialsObjectName = "vsphere-credentials" + EksavSphereUsernameKey = "EKSA_VSPHERE_USERNAME" + EksavSpherePasswordKey = "EKSA_VSPHERE_PASSWORD" + // Username and password for cloud provider + EksavSphereCPUsernameKey = "EKSA_VSPHERE_CP_USERNAME" + EksavSphereCPPasswordKey = "EKSA_VSPHERE_CP_PASSWORD" + // Username and password for the CSI driver + EksavSphereCSIUsernameKey = "EKSA_VSPHERE_CSI_USERNAME" + EksavSphereCSIPasswordKey = "EKSA_VSPHERE_CSI_PASSWORD" + eksaLicense = "EKSA_LICENSE" + vSphereUsernameKey = "VSPHERE_USERNAME" + vSpherePasswordKey = "VSPHERE_PASSWORD" + vSphereServerKey = "VSPHERE_SERVER" + govcInsecure = "GOVC_INSECURE" + expClusterResourceSetKey = "EXP_CLUSTER_RESOURCE_SET" + defaultTemplateLibrary = "eks-a-templates" + defaultTemplatesFolder = "vm/Templates" + bottlerocketDefaultUser = "ec2-user" + ubuntuDefaultUser = "capv" + maxRetries = 30 + backOffPeriod = 5 * time.Second ) //go:embed config/template-cp.yaml @@ -688,6 +694,25 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec, datacenterSpec v1alpha1.VSphe Append(clusterapi.PodIAMAuthExtraArgs(clusterSpec.Cluster.Spec.PodIAMConfig)). Append(sharedExtraArgs) + eksaVsphereUsername := os.Getenv(EksavSphereUsernameKey) + eksaVspherePassword := os.Getenv(EksavSpherePasswordKey) + + // Cloud provider credentials + eksaCPUsername := os.Getenv(EksavSphereCPUsernameKey) + eksaCPassword := os.Getenv(EksavSphereCPPasswordKey) + + if eksaCPUsername == "" { + eksaCPUsername = eksaVsphereUsername + eksaCPassword = eksaVspherePassword + } + // CSI driver credentials + eksaCSIUsername := os.Getenv(EksavSphereCSIUsernameKey) + eksaCSIPassword := os.Getenv(EksavSphereCSIPasswordKey) + if eksaCSIUsername == "" { + eksaCSIUsername = eksaVsphereUsername + eksaCSIPassword = eksaVspherePassword + } + values := map[string]interface{}{ "clusterName": clusterSpec.Cluster.Name, "controlPlaneEndpointIp": clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host, @@ -734,8 +759,12 @@ func buildTemplateMapCP(clusterSpec *cluster.Spec, datacenterSpec v1alpha1.VSphe "eksaSystemNamespace": constants.EksaSystemNamespace, "auditPolicy": common.GetAuditPolicy(), "resourceSetName": resourceSetName(clusterSpec), - "eksaVsphereUsername": os.Getenv(EksavSphereUsernameKey), - "eksaVspherePassword": os.Getenv(EksavSpherePasswordKey), + "eksaVsphereUsername": eksaVsphereUsername, + "eksaVspherePassword": eksaVspherePassword, + "eksaCloudProviderUsername": eksaCPUsername, + "eksaCloudProviderPassword": eksaCPassword, + "eksaCSIUsername": eksaCSIUsername, + "eksaCSIPassword": eksaCSIPassword, } if clusterSpec.Cluster.Spec.RegistryMirrorConfiguration != nil { diff --git a/pkg/providers/vsphere/vsphere_test.go b/pkg/providers/vsphere/vsphere_test.go index 87c05e42c1f7..db154778cef3 100644 --- a/pkg/providers/vsphere/vsphere_test.go +++ b/pkg/providers/vsphere/vsphere_test.go @@ -3128,3 +3128,78 @@ func TestClusterSpecChangedMachineConfigsChanged(t *testing.T) { t.Fatalf("expected spec change but none was detected") } } + +func TestProviderGenerateCAPISpecForCreateMultipleCredentials(t *testing.T) { + tests := []struct { + testName string + wantCPFile string + envMap map[string]string + }{ + { + testName: "specify cloud provider credentials", + wantCPFile: "testdata/expected_results_main_cp_cloud_provider_credentials.yaml", + envMap: map[string]string{EksavSphereCPUsernameKey: "EksavSphereCPUsername", EksavSphereCPPasswordKey: "EksavSphereCPPassword"}, + }, + { + testName: "specify CSI credentials", + wantCPFile: "testdata/expected_results_main_cp_csi_driver_credentials.yaml", + envMap: map[string]string{EksavSphereCSIUsernameKey: "EksavSphereCSIUsername", EksavSphereCSIPasswordKey: "EksavSphereCSIPassword"}, + }, + { + testName: "specify cloud provider and CSI credentials", + wantCPFile: "testdata/expected_results_main_cp_cloud_provder_and_csi_driver_credentials.yaml", + envMap: map[string]string{ + EksavSphereCSIUsernameKey: "EksavSphereCSIUsername", + EksavSphereCSIPasswordKey: "EksavSphereCSIPassword", + EksavSphereCPUsernameKey: "EksavSphereCPUsername", + EksavSphereCPPasswordKey: "EksavSphereCPPassword", + }, + }, + } + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + mockCtrl := gomock.NewController(t) + var tctx testContext + tctx.SaveContext() + defer tctx.RestoreContext() + + previousValues := map[string]string{} + for k, v := range tt.envMap { + previousValues[k] = os.Getenv(k) + if err := os.Setenv(k, v); err != nil { + t.Fatalf(err.Error()) + } + } + + ctx := context.Background() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{ + Name: "test", + } + clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename) + + datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename) + machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename) + provider := newProviderWithKubectl(t, datacenterConfig, machineConfigs, clusterSpec.Cluster, kubectl) + if provider == nil { + t.Fatalf("provider object is nil") + } + + err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) + if err != nil { + t.Fatalf("failed to setup and validate: %v", err) + } + + cp, _, err := provider.GenerateCAPISpecForCreate(context.Background(), cluster, clusterSpec) + if err != nil { + t.Fatalf("failed to generate cluster api spec contents: %v", err) + } + test.AssertContentToFile(t, string(cp), tt.wantCPFile) + for k, v := range previousValues { + if err := os.Setenv(k, v); err != nil { + t.Fatalf(err.Error()) + } + } + }) + } +}