From 2ac17bee69df37badcb8b177a514a0dfeb61e6f3 Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sat, 24 Oct 2020 22:50:26 -0700 Subject: [PATCH 1/2] Remove code for no-longer-supported k8s releases --- k8s/crds/kops.k8s.io_clusters.yaml | 2 +- nodeup/pkg/model/BUILD.bazel | 1 - nodeup/pkg/model/kube_apiserver.go | 11 +- nodeup/pkg/model/kube_apiserver_test.go | 40 - nodeup/pkg/model/kube_scheduler.go | 15 +- nodeup/pkg/model/networking/calico.go | 21 - .../auditDynamicConfiguration/cluster.yaml | 63 - pkg/apis/kops/cluster.go | 4 +- pkg/apis/kops/v1alpha2/cluster.go | 4 +- pkg/apis/kops/validation/openstack.go | 3 - pkg/apis/kops/validation/validation.go | 23 +- pkg/apis/kops/validation/validation_test.go | 22 +- pkg/model/components/apiserver.go | 20 +- pkg/model/components/cilium.go | 6 +- pkg/model/components/docker.go | 4 +- pkg/model/components/etcd.go | 27 +- pkg/model/components/etcdmanager/options.go | 2 +- pkg/model/components/openstack.go | 6 +- pkg/model/context.go | 2 +- pkg/model/firewall.go | 6 - pkg/model/iam/iam_builder.go | 18 - pkg/model/pki.go | 11 - upup/models/bindata.go | 13031 +++++----------- .../authentication.aws/k8s-1.10.yaml.template | 69 - .../authentication.kope.io/k8s-1.8.yaml | 185 - .../core.addons.k8s.io/k8s-1.7.yaml.template | 153 - .../k8s-1.6.yaml.template | 197 - .../k8s-1.6.yaml.template | 116 - .../k8s-1.6.yaml.template | 92 - .../k8s-1.6.yaml.template | 311 - .../k8s-1.10.yaml.template | 132 - .../k8s-1.7.yaml.template | 692 - .../networking.flannel/k8s-1.6.yaml.template | 163 - .../addons/networking.kope.io/k8s-1.6.yaml | 104 - .../k8s-1.6.yaml.template | 160 - .../k8s-1.9.yaml.template | 543 - .../k8s-1.7-v3.yaml.template | 749 - .../k8s-1.7.yaml.template | 550 - .../networking.weave/k8s-1.9.yaml.template | 284 - .../k8s-1.10.yaml.template | 188 - .../k8s-1.10.yaml.template | 82 - .../pkg/fi/cloudup/bootstrapchannelbuilder.go | 488 +- upup/pkg/fi/cloudup/populatecluster_test.go | 6 +- 43 files changed, 4086 insertions(+), 14520 deletions(-) delete mode 100644 nodeup/pkg/model/tests/apiServer/auditDynamicConfiguration/cluster.yaml delete mode 100644 upup/models/cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml delete mode 100644 upup/models/cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml delete mode 100644 upup/models/cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template delete mode 100644 upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template diff --git a/k8s/crds/kops.k8s.io_clusters.yaml b/k8s/crds/kops.k8s.io_clusters.yaml index d8d0b6d371036..0023c749d6396 100644 --- a/k8s/crds/kops.k8s.io_clusters.yaml +++ b/k8s/crds/kops.k8s.io_clusters.yaml @@ -631,7 +631,7 @@ spec: description: Name is the name of the etcd cluster (main, events etc) type: string provider: - description: 'Provider is the provider used to run etcd: standalone, manager. We default to manager for kubernetes 1.11 or if the manager is configured; otherwise standalone.' + description: 'Provider is the provider used to run etcd: Manager, Legacy. Defaults to Manager.' type: string version: description: Version is the version of etcd to run i.e. 2.1.2, 3.0.17 etcd diff --git a/nodeup/pkg/model/BUILD.bazel b/nodeup/pkg/model/BUILD.bazel index 350dd322376a6..2305073fe1932 100644 --- a/nodeup/pkg/model/BUILD.bazel +++ b/nodeup/pkg/model/BUILD.bazel @@ -112,7 +112,6 @@ go_test( "//pkg/testutils:go_default_library", "//upup/pkg/fi:go_default_library", "//upup/pkg/fi/cloudup:go_default_library", - "//upup/pkg/fi/nodeup/nodetasks:go_default_library", "//util/pkg/architectures:go_default_library", "//util/pkg/distributions:go_default_library", "//util/pkg/exec:go_default_library", diff --git a/nodeup/pkg/model/kube_apiserver.go b/nodeup/pkg/model/kube_apiserver.go index 0fcbfa5c398fb..c3329033883a0 100644 --- a/nodeup/pkg/model/kube_apiserver.go +++ b/nodeup/pkg/model/kube_apiserver.go @@ -64,11 +64,7 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error { if *b.Cluster.Spec.EncryptionConfig { encryptionConfigPath := fi.String(filepath.Join(b.PathSrvKubernetes(), "encryptionconfig.yaml")) - if b.IsKubernetesGTE("1.13") { - b.Cluster.Spec.KubeAPIServer.EncryptionProviderConfig = encryptionConfigPath - } else { - b.Cluster.Spec.KubeAPIServer.ExperimentalEncryptionProviderConfig = encryptionConfigPath - } + b.Cluster.Spec.KubeAPIServer.EncryptionProviderConfig = encryptionConfigPath key := "encryptionconfig" encryptioncfg, err := b.SecretStore.Secret(key) @@ -375,11 +371,6 @@ func (b *KubeAPIServerBuilder) buildPod() (*v1.Pod, error) { } } - //remove elements from the spec that are not enabled yet - if b.Cluster.Spec.KubeAPIServer.AuditDynamicConfiguration != nil && !b.IsKubernetesGTE("1.13") { - b.Cluster.Spec.KubeAPIServer.AuditDynamicConfiguration = nil - } - // build the kube-apiserver flags for the service flags, err := flagbuilder.BuildFlagsList(b.Cluster.Spec.KubeAPIServer) if err != nil { diff --git a/nodeup/pkg/model/kube_apiserver_test.go b/nodeup/pkg/model/kube_apiserver_test.go index 287c5375dc777..38b621623f07d 100644 --- a/nodeup/pkg/model/kube_apiserver_test.go +++ b/nodeup/pkg/model/kube_apiserver_test.go @@ -17,54 +17,14 @@ limitations under the License. package model import ( - "bytes" - "strings" "testing" "k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/flagbuilder" "k8s.io/kops/upup/pkg/fi" - "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" "k8s.io/kops/util/pkg/architectures" ) -func Test_KubeAPIServer_Builder(t *testing.T) { - basedir := "tests/apiServer/auditDynamicConfiguration" - - context := &fi.ModelBuilderContext{ - Tasks: make(map[string]fi.Task), - } - - nodeUpModelContext, err := BuildNodeupModelContext(basedir) - if err != nil { - t.Fatalf("error loading model %q: %v", basedir, err) - return - } - keystore := &fakeCAStore{} - keystore.T = t - nodeUpModelContext.KeyStore = keystore - - builder := KubeAPIServerBuilder{NodeupModelContext: nodeUpModelContext} - - err = builder.Build(context) - if err != nil { - t.Fatalf("error from KubeAPIServerBuilder buildKubeletConfig: %v", err) - return - } - if task, ok := context.Tasks["File//etc/kubernetes/manifests/kube-apiserver.manifest"]; !ok { - t.Error("did not find the kubernetes API manifest after the build") - } else { - nodeTask, _ := task.(*nodetasks.File) - reader, _ := nodeTask.Contents.Open() - buf := new(bytes.Buffer) - buf.ReadFrom(reader) - s := buf.String() - if strings.Contains(s, "--audit-dynamic-configuration") { - t.Error("Older versions of k8s should not have --audit-dynamic-configuration flag") - } - } -} - func Test_KubeAPIServer_BuildFlags(t *testing.T) { grid := []struct { config kops.KubeAPIServerConfig diff --git a/nodeup/pkg/model/kube_scheduler.go b/nodeup/pkg/model/kube_scheduler.go index c768f9a640db0..bade176c1d624 100644 --- a/nodeup/pkg/model/kube_scheduler.go +++ b/nodeup/pkg/model/kube_scheduler.go @@ -66,9 +66,8 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { if !b.IsMaster { return nil } - useConfigFile := b.IsKubernetesGTE("1.12") { - pod, err := b.buildPod(useConfigFile) + pod, err := b.buildPod() if err != nil { return fmt.Errorf("error building kube-scheduler pod: %v", err) } @@ -95,7 +94,7 @@ func (b *KubeSchedulerBuilder) Build(c *fi.ModelBuilderContext) error { Mode: s("0400"), }) } - if useConfigFile { + { var config *SchedulerConfig if b.IsKubernetesGTE("1.19") { config = NewSchedulerConfig("kubescheduler.config.k8s.io/v1beta1") @@ -142,19 +141,15 @@ func NewSchedulerConfig(apiVersion string) *SchedulerConfig { } // buildPod is responsible for constructing the pod specification -func (b *KubeSchedulerBuilder) buildPod(useConfigFile bool) (*v1.Pod, error) { +func (b *KubeSchedulerBuilder) buildPod() (*v1.Pod, error) { c := b.Cluster.Spec.KubeScheduler flags, err := flagbuilder.BuildFlagsList(c) if err != nil { return nil, fmt.Errorf("error building kube-scheduler flags: %v", err) } - if useConfigFile { - flags = append(flags, "--config="+"/var/lib/kube-scheduler/config.yaml") - } else { - // Add kubeconfig flag - flags = append(flags, "--kubeconfig="+defaultKubeConfig) - } + + flags = append(flags, "--config="+"/var/lib/kube-scheduler/config.yaml") if c.UsePolicyConfigMap != nil { flags = append(flags, "--policy-configmap=scheduler-policy", "--policy-configmap-namespace=kube-system") diff --git a/nodeup/pkg/model/networking/calico.go b/nodeup/pkg/model/networking/calico.go index aea39f9cc0c9b..cd2de3c01cd71 100644 --- a/nodeup/pkg/model/networking/calico.go +++ b/nodeup/pkg/model/networking/calico.go @@ -17,8 +17,6 @@ limitations under the License. package networking import ( - "path/filepath" - "k8s.io/kops/nodeup/pkg/model" "k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi/nodeup/nodetasks" @@ -43,24 +41,5 @@ func (b *CalicoBuilder) Build(c *fi.ModelBuilderContext) error { c.AddTask(&nodetasks.Package{Name: "wireguard"}) } - // @check if tls is enabled and if so, we need to download the client certificates - if b.IsKubernetesLT("1.12") && !b.UseEtcdManager() && b.UseEtcdTLS() { - name := "calico-client" - dirname := "calico" - ca := filepath.Join(dirname, "ca.pem") - certificate := filepath.Join(dirname, name+".pem") - key := filepath.Join(dirname, name+"-key.pem") - - if err := b.BuildCertificateTask(c, name, certificate, nil); err != nil { - return err - } - if err := b.BuildPrivateKeyTask(c, name, key, nil); err != nil { - return err - } - if err := b.BuildCertificateTask(c, fi.CertificateIDCA, ca, nil); err != nil { - return err - } - } - return nil } diff --git a/nodeup/pkg/model/tests/apiServer/auditDynamicConfiguration/cluster.yaml b/nodeup/pkg/model/tests/apiServer/auditDynamicConfiguration/cluster.yaml deleted file mode 100644 index 185a800322ca6..0000000000000 --- a/nodeup/pkg/model/tests/apiServer/auditDynamicConfiguration/cluster.yaml +++ /dev/null @@ -1,63 +0,0 @@ -apiVersion: kops.k8s.io/v1alpha2 -kind: Cluster -metadata: - creationTimestamp: "2016-12-10T22:42:27Z" - name: minimal.example.com -spec: - kubeAPIServer: - auditDynamicConfiguration: true - kubernetesApiAccess: - - 0.0.0.0/0 - channel: stable - cloudProvider: aws - configBase: memfs://clusters.example.com/minimal.example.com - etcdClusters: - - etcdMembers: - - instanceGroup: master-us-test-1a - name: master-us-test-1a - name: main - - etcdMembers: - - instanceGroup: master-us-test-1a - name: master-us-test-1a - name: events - kubelet: - featureGates: - ExperimentalCriticalPodAnnotation: "true" - AllowExtTrafficLocalEndpoints: "false" - podManifestPath: "/etc/kubernetes/manifests" - kubernetesVersion: v1.12.0 - masterInternalName: api.internal.minimal.example.com - masterPublicName: api.minimal.example.com - networkCIDR: 172.20.0.0/16 - networking: - kubenet: {} - nonMasqueradeCIDR: 100.64.0.0/10 - sshAccess: - - 0.0.0.0/0 - topology: - masters: public - nodes: public - subnets: - - cidr: 172.20.32.0/19 - name: us-test-1a - type: Public - zone: us-test-1a - ---- - -apiVersion: kops.k8s.io/v1alpha2 -kind: InstanceGroup -metadata: - creationTimestamp: "2016-12-10T22:42:28Z" - name: masters - labels: - kops.k8s.io/cluster: minimal.example.com -spec: - associatePublicIp: true - image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21 - machineType: t2.medium - maxSize: 1 - minSize: 1 - role: Master - subnets: - - us-test-1a diff --git a/pkg/apis/kops/cluster.go b/pkg/apis/kops/cluster.go index 853981ce42a2c..f5f62fea62108 100644 --- a/pkg/apis/kops/cluster.go +++ b/pkg/apis/kops/cluster.go @@ -453,8 +453,8 @@ var SupportedEtcdProviderTypes = []string{ type EtcdClusterSpec struct { // Name is the name of the etcd cluster (main, events etc) Name string `json:"name,omitempty"` - // Provider is the provider used to run etcd: standalone, manager. - // We default to manager for kubernetes 1.11 or if the manager is configured; otherwise standalone. + // Provider is the provider used to run etcd: Manager, Legacy. + // Defaults to Manager. Provider EtcdProviderType `json:"provider,omitempty"` // Members stores the configurations for each member of the cluster (including the data volume) Members []EtcdMemberSpec `json:"etcdMembers,omitempty"` diff --git a/pkg/apis/kops/v1alpha2/cluster.go b/pkg/apis/kops/v1alpha2/cluster.go index 45ef04c642469..853893847c66c 100644 --- a/pkg/apis/kops/v1alpha2/cluster.go +++ b/pkg/apis/kops/v1alpha2/cluster.go @@ -450,8 +450,8 @@ const ( type EtcdClusterSpec struct { // Name is the name of the etcd cluster (main, events etc) Name string `json:"name,omitempty"` - // Provider is the provider used to run etcd: standalone, manager. - // We default to manager for kubernetes 1.11 or if the manager is configured; otherwise standalone. + // Provider is the provider used to run etcd: Manager, Legacy. + // Defaults to Manager. Provider EtcdProviderType `json:"provider,omitempty"` // Members stores the configurations for each member of the cluster (including the data volume) Members []EtcdMemberSpec `json:"etcdMembers,omitempty"` diff --git a/pkg/apis/kops/validation/openstack.go b/pkg/apis/kops/validation/openstack.go index af94646606dac..8d1cbf53794d8 100644 --- a/pkg/apis/kops/validation/openstack.go +++ b/pkg/apis/kops/validation/openstack.go @@ -34,8 +34,5 @@ func openstackValidateCluster(c *kops.Cluster) (errList field.ErrorList) { errList = append(errList, field.Forbidden(field.NewPath("spec", "topology", "masters"), "Public topology requires an external network")) } } - if c.Spec.ExternalCloudControllerManager != nil && !c.IsKubernetesGTE("1.13") { - errList = append(errList, field.Forbidden(field.NewPath("spec", "cloudControllerManager"), "External cloud controller manager for OpenStack is only supported as of kubernetes 1.13")) - } return errList } diff --git a/pkg/apis/kops/validation/validation.go b/pkg/apis/kops/validation/validation.go index 442afdad61a18..cbe0f06c9a1b0 100644 --- a/pkg/apis/kops/validation/validation.go +++ b/pkg/apis/kops/validation/validation.go @@ -702,15 +702,12 @@ func validateNetworkingCilium(cluster *kops.Cluster, v *kops.CiliumNetworkingSpe allErrs = append(allErrs, field.Invalid(versionFld, v.Version, "Only versions 1.6 through 1.8 are supported")) } - if version.Minor == 6 && (!cluster.IsKubernetesGTE("1.11") || cluster.IsKubernetesGTE("1.16")) { - allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.6 requires kubernetesVersion between 1.11 and 1.16")) + if version.Minor == 6 && cluster.IsKubernetesGTE("1.16") { + allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.6 requires kubernetesVersion before 1.16")) } - if version.Minor == 7 && (!cluster.IsKubernetesGTE("1.12") || cluster.IsKubernetesGTE("1.17")) { - allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.7 requires kubernetesVersion between 1.12 and 1.17")) - } - if version.Minor == 8 && !cluster.IsKubernetesGTE("1.12") { - allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.8 requires kubernetesVersion 1.12 or newer")) + if version.Minor == 7 && cluster.IsKubernetesGTE("1.17") { + allErrs = append(allErrs, field.Forbidden(versionFld, "Version 1.7 requires kubernetesVersion before 1.17")) } if v.Hubble != nil && fi.BoolValue(v.Hubble.Enabled) { @@ -894,7 +891,7 @@ func validateEtcdVersion(spec kops.EtcdClusterSpec, fieldPath *field.Path, minim version := spec.Version if spec.Version == "" { - version = components.DefaultEtcd2Version + version = components.DefaultEtcd3Version_1_13 } sem, err := semver.Parse(strings.TrimPrefix(version, "v")) @@ -902,15 +899,15 @@ func validateEtcdVersion(spec kops.EtcdClusterSpec, fieldPath *field.Path, minim return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "the storage version is invalid")} } - // we only support v3 and v2 for now - if sem.Major == 3 || sem.Major == 2 { + // we only support v3 for now + if sem.Major == 3 { if sem.LT(*minimalVersion) { return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, fmt.Sprintf("minimum version required is %s", minimalVersion.String()))} } return nil } - return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "unsupported storage version, we only support major versions 2 and 3")} + return field.ErrorList{field.Invalid(fieldPath.Child("version"), version, "unsupported storage version, we only support major version 3")} } // validateEtcdMemberSpec is responsible for validate the cluster member @@ -1172,10 +1169,6 @@ func validateNodeLocalDNS(spec *kops.ClusterSpec, fldpath *field.Path) field.Err } func validateClusterAutoscaler(cluster *kops.Cluster, spec *kops.ClusterAutoscalerConfig, fldPath *field.Path) (allErrs field.ErrorList) { - if !cluster.IsKubernetesGTE("1.12") { - allErrs = append(allErrs, field.Forbidden(fldPath, "Cluster autoscaler requires kubernetesVersion 1.12 or higher")) - } - allErrs = append(allErrs, IsValidValue(fldPath.Child("expander"), spec.Expander, []string{"least-waste", "random", "most-pods"})...) if kops.CloudProviderID(cluster.Spec.CloudProvider) == kops.CloudProviderOpenstack { diff --git a/pkg/apis/kops/validation/validation_test.go b/pkg/apis/kops/validation/validation_test.go index f915e896279bb..0c1edb0c9511b 100644 --- a/pkg/apis/kops/validation/validation_test.go +++ b/pkg/apis/kops/validation/validation_test.go @@ -618,28 +618,10 @@ func Test_Validate_Cilium(t *testing.T) { Version: "v1.0.0", }, Spec: kops.ClusterSpec{ - KubernetesVersion: "1.11.0", + KubernetesVersion: "1.18.0", }, ExpectedErrors: []string{"Invalid value::cilium.version"}, }, - { - Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.7.0", - }, - Spec: kops.ClusterSpec{ - KubernetesVersion: "1.11.0", - }, - ExpectedErrors: []string{"Forbidden::cilium.version"}, - }, - { - Cilium: kops.CiliumNetworkingSpec{ - Version: "v1.7.0-rc1", - }, - Spec: kops.ClusterSpec{ - KubernetesVersion: "1.11.0", - }, - ExpectedErrors: []string{"Forbidden::cilium.version"}, - }, { Cilium: kops.CiliumNetworkingSpec{ Version: "v1.7.0", @@ -683,7 +665,7 @@ func Test_Validate_Cilium(t *testing.T) { Cilium: &g.Cilium, } if g.Spec.KubernetesVersion == "" { - g.Spec.KubernetesVersion = "1.12.0" + g.Spec.KubernetesVersion = "1.15.0" } cluster := &kops.Cluster{ Spec: g.Spec, diff --git a/pkg/model/components/apiserver.go b/pkg/model/components/apiserver.go index b2e4b79ae0d3f..3b95304394c0d 100644 --- a/pkg/model/components/apiserver.go +++ b/pkg/model/components/apiserver.go @@ -171,25 +171,7 @@ func (b *KubeAPIServerOptionsBuilder) BuildOptions(o interface{}) error { // TODO: We can probably rewrite these more clearly in descending order // Based on recommendations from: // https://kubernetes.io/docs/admin/admission-controllers/#is-there-a-recommended-set-of-admission-controllers-to-use - if b.IsKubernetesLT("1.12") { - c.EnableAdmissionPlugins = []string{ - "Initializers", - "NamespaceLifecycle", - "LimitRanger", - "ServiceAccount", - "PersistentVolumeLabel", - "DefaultStorageClass", - "DefaultTolerationSeconds", - "MutatingAdmissionWebhook", - "ValidatingAdmissionWebhook", - "NodeRestriction", - "ResourceQuota", - } - c.EnableAdmissionPlugins = append(c.EnableAdmissionPlugins, c.AppendAdmissionPlugins...) - } - // Based on recommendations from: - // https://kubernetes.io/docs/admin/admission-controllers/#is-there-a-recommended-set-of-admission-controllers-to-use - if b.IsKubernetesGTE("1.12") { + { c.EnableAdmissionPlugins = []string{ "NamespaceLifecycle", "LimitRanger", diff --git a/pkg/model/components/cilium.go b/pkg/model/components/cilium.go index 71dce1f930575..d20147161d284 100644 --- a/pkg/model/components/cilium.go +++ b/pkg/model/components/cilium.go @@ -39,11 +39,7 @@ func (b *CiliumOptionsBuilder) BuildOptions(o interface{}) error { } if c.Version == "" { - if b.Context.IsKubernetesLT("1.12.0") { - c.Version = "v1.6.12" - } else { - c.Version = "v1.8.4" - } + c.Version = "v1.8.4" } version, _ := semver.ParseTolerant(c.Version) diff --git a/pkg/model/components/docker.go b/pkg/model/components/docker.go index db524ea15cc70..70e57280afd7c 100644 --- a/pkg/model/components/docker.go +++ b/pkg/model/components/docker.go @@ -51,10 +51,8 @@ func (b *DockerOptionsBuilder) BuildOptions(o interface{}) error { docker.Version = fi.String("19.03.13") } else if b.IsKubernetesGTE("1.16") { docker.Version = fi.String("18.09.9") - } else if b.IsKubernetesGTE("1.12") { - docker.Version = fi.String("18.06.3") } else { - docker.Version = fi.String("17.03.2") + docker.Version = fi.String("18.06.3") } } diff --git a/pkg/model/components/etcd.go b/pkg/model/components/etcd.go index a14d8f383d8d6..f434fbd00ef7a 100644 --- a/pkg/model/components/etcd.go +++ b/pkg/model/components/etcd.go @@ -34,11 +34,6 @@ type EtcdOptionsBuilder struct { var _ loader.OptionsBuilder = &EtcdOptionsBuilder{} const ( - DefaultEtcd2Version = "2.2.1" - - // 1.11 originally recommended 3.2.18, but there was an advisory to update to 3.2.24 - DefaultEtcd3Version_1_11 = "3.2.24" - DefaultEtcd3Version_1_13 = "3.2.24" DefaultEtcd3Version_1_14 = "3.3.10" @@ -53,13 +48,7 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error { for i := range spec.EtcdClusters { c := &spec.EtcdClusters[i] if c.Provider == "" { - if b.IsKubernetesGTE("1.12") { - c.Provider = kops.EtcdProviderTypeManager - } else if c.Manager != nil { - c.Provider = kops.EtcdProviderTypeManager - } else { - c.Provider = kops.EtcdProviderTypeLegacy - } + c.Provider = kops.EtcdProviderTypeManager } // Ensure the version is set @@ -69,27 +58,23 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error { c.Version = DefaultEtcd3Version_1_17 } else if b.IsKubernetesGTE("1.14") { c.Version = DefaultEtcd3Version_1_14 - } else if b.IsKubernetesGTE("1.13") { - c.Version = DefaultEtcd3Version_1_13 } else { - c.Version = DefaultEtcd2Version + c.Version = DefaultEtcd3Version_1_13 } } if c.Version == "" && c.Provider == kops.EtcdProviderTypeManager { - // From 1.11, we run the k8s-recommended versions of etcd when using the manager + // We run the k8s-recommended versions of etcd when using the manager if b.IsKubernetesGTE("1.17") { c.Version = DefaultEtcd3Version_1_17 } else if b.IsKubernetesGTE("1.14") { c.Version = DefaultEtcd3Version_1_14 - } else if b.IsKubernetesGTE("1.13") { - c.Version = DefaultEtcd3Version_1_13 } else { - c.Version = DefaultEtcd3Version_1_11 + c.Version = DefaultEtcd3Version_1_13 } } - // From 1.12, we enable TLS if we're running EtcdManager & etcd3 + // We enable TLS if we're running EtcdManager & etcd3 // // (Moving to etcd3 is a disruptive upgrade, so we // force TLS at the same time as we enable @@ -106,7 +91,7 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error { return fmt.Errorf("unexpected etcd version %q", c.Version) } - if b.IsKubernetesGTE("1.12.0") && etcdV3 { + if etcdV3 { c.EnableEtcdTLS = true c.EnableTLSAuth = true } diff --git a/pkg/model/components/etcdmanager/options.go b/pkg/model/components/etcdmanager/options.go index f19be87d98d10..efc27f5502e41 100644 --- a/pkg/model/components/etcdmanager/options.go +++ b/pkg/model/components/etcdmanager/options.go @@ -75,7 +75,7 @@ func (b *EtcdManagerOptionsBuilder) BuildOptions(o interface{}) error { return nil } -var supportedEtcdVersions = []string{"2.2.1", "3.1.12", "3.2.18", "3.2.24", "3.3.10", "3.3.13", "3.3.17", "3.4.3"} +var supportedEtcdVersions = []string{"3.1.12", "3.2.18", "3.2.24", "3.3.10", "3.3.13", "3.3.17", "3.4.3"} func etcdVersionIsSupported(version string) bool { version = strings.TrimPrefix(version, "v") diff --git a/pkg/model/components/openstack.go b/pkg/model/components/openstack.go index 27e12c1c4683c..6583cef656a6a 100644 --- a/pkg/model/components/openstack.go +++ b/pkg/model/components/openstack.go @@ -52,10 +52,8 @@ func (b *OpenStackOptionsBulder) BuildOptions(o interface{}) error { clusterSpec.CloudConfig.Openstack.BlockStorage.CreateStorageClass = fi.Bool(true) } - if b.Context.IsKubernetesGTE("1.13.0") { - if clusterSpec.ExternalCloudControllerManager == nil { - clusterSpec.ExternalCloudControllerManager = &kops.CloudControllerManagerConfig{} - } + if clusterSpec.ExternalCloudControllerManager == nil { + clusterSpec.ExternalCloudControllerManager = &kops.CloudControllerManagerConfig{} } return nil diff --git a/pkg/model/context.go b/pkg/model/context.go index f946cddc3d2d7..caa7bced0bafd 100644 --- a/pkg/model/context.go +++ b/pkg/model/context.go @@ -411,5 +411,5 @@ func (m *KopsModelContext) NodePortRange() (utilnet.PortRange, error) { // UseServiceAccountIAM returns true if we are using service-account bound IAM roles. func (m *KopsModelContext) UseServiceAccountIAM() bool { - return featureflag.UseServiceAccountIAM.Enabled() && m.IsKubernetesGTE("1.12") + return featureflag.UseServiceAccountIAM.Enabled() } diff --git a/pkg/model/firewall.go b/pkg/model/firewall.go index d7d9b3872b17f..2e834d7b0f537 100644 --- a/pkg/model/firewall.go +++ b/pkg/model/firewall.go @@ -250,12 +250,6 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu } if b.Cluster.Spec.Networking.Calico != nil { - if b.IsKubernetesLT("1.12") { - // Calico needs to access etcd - // TODO: Remove, replace with etcd in calico manifest - klog.Warningf("Opening etcd port on masters for access from the nodes, for calico. This is unsafe in untrusted environments.") - tcpBlocked[4001] = false - } protocols = append(protocols, ProtocolIPIP) } diff --git a/pkg/model/iam/iam_builder.go b/pkg/model/iam/iam_builder.go index aa4c0b3a15b9b..63bd049be334e 100644 --- a/pkg/model/iam/iam_builder.go +++ b/pkg/model/iam/iam_builder.go @@ -569,24 +569,6 @@ func ReadableStatePaths(cluster *kops.Cluster, role Subject) ([]string, error) { paths = append(paths, "/pki/private/kube-router/*") } - // @check if calico is enabled as the CNI provider and permit access to the client TLS certificate by default - if cluster.IsKubernetesLT("1.12") && networkingSpec.Calico != nil { - calicoClientCert := false - for _, x := range cluster.Spec.EtcdClusters { - if x.Provider == kops.EtcdProviderTypeManager { - calicoClientCert = false - break - } - if x.EnableEtcdTLS { - calicoClientCert = true - } - } - - if calicoClientCert { - paths = append(paths, "/pki/private/calico-client/*") - } - } - // @check if cilium is enabled as the CNI provider and permit access to the cilium etc client TLS certificate by default // As long as the Cilium Etcd cluster exists, we should do this if networkingSpec.Cilium != nil && model.UseCiliumEtcd(cluster) { diff --git a/pkg/model/pki.go b/pkg/model/pki.go index bfa5c5cdafef8..7fd69873fdb99 100644 --- a/pkg/model/pki.go +++ b/pkg/model/pki.go @@ -127,17 +127,6 @@ func (b *PKIModelBuilder) Build(c *fi.ModelBuilderContext) error { Type: "client", Signer: defaultCA, }) - - // @check if calico is enabled as the CNI provider - if b.IsKubernetesLT("1.12") && b.KopsModelContext.Cluster.Spec.Networking.Calico != nil { - c.AddTask(&fitasks.Keypair{ - Name: fi.String("calico-client"), - Lifecycle: b.Lifecycle, - Subject: "cn=calico-client", - Type: "client", - Signer: defaultCA, - }) - } } if b.KopsModelContext.Cluster.Spec.Networking.Kuberouter != nil && !b.UseKopsControllerForNodeBootstrap() { diff --git a/upup/models/bindata.go b/upup/models/bindata.go index 61ae496e7b848..48c47bfb9b456 100644 --- a/upup/models/bindata.go +++ b/upup/models/bindata.go @@ -2,60 +2,42 @@ // sources: // upup/models/cloudup/resources/addons/OWNERS // upup/models/cloudup/resources/addons/anonymous-issuer-discovery.addons.k8s.io/k8s-1.16.yaml.template -// upup/models/cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template // upup/models/cloudup/resources/addons/authentication.aws/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.12.yaml -// upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml // upup/models/cloudup/resources/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml.template // upup/models/cloudup/resources/addons/core.addons.k8s.io/addon.yaml // upup/models/cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template // upup/models/cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml // upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template // upup/models/cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml.template // upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template // upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/README.md // upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template // upup/models/cloudup/resources/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml.template // upup/models/cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template // upup/models/cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml // upup/models/cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml // upup/models/cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml // upup/models/cloudup/resources/addons/metadata-proxy.addons.k8s.io/addon.yaml // upup/models/cloudup/resources/addons/metadata-proxy.addons.k8s.io/v0.1.12.yaml // upup/models/cloudup/resources/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml.template -// upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template // upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template // upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template // upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template // upup/models/cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template // upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml -// upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml // upup/models/cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template // upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template -// upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template -// upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template // upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template // upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template -// upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template // upup/models/cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template -// upup/models/cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template -// upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template // upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template // upup/models/cloudup/resources/addons/nodelocaldns.addons.k8s.io/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template -// upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template // upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml.template // upup/models/cloudup/resources/addons/rbac.addons.k8s.io/k8s-1.8.yaml // upup/models/cloudup/resources/addons/scheduler.addons.k8s.io/v1.7.0.yaml @@ -169,92 +151,6 @@ func cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate return a, nil } -var _cloudupResourcesAddonsAuthenticationAwsK8s110YamlTemplate = []byte(`--- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - namespace: kube-system - name: aws-iam-authenticator - labels: - k8s-app: aws-iam-authenticator -spec: - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" - labels: - k8s-app: aws-iam-authenticator - spec: - # run on the host network (don't depend on CNI) - hostNetwork: true - - # run on each master node - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - key: CriticalAddonsOnly - operator: Exists - - # run ` + "`" + `aws-iam-authenticator server` + "`" + ` with three volumes - # - config (mounted from the ConfigMap at /etc/aws-iam-authenticator/config.yaml) - # - state (persisted TLS certificate and keys, mounted from the host) - # - output (output kubeconfig to plug into your apiserver configuration, mounted from the host) - containers: - - name: aws-iam-authenticator - image: {{ or .Authentication.Aws.Image "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-iam-authenticator:v0.4.0" }} - args: - - server - - --config=/etc/aws-iam-authenticator/config.yaml - - --state-dir=/var/aws-iam-authenticator - - --kubeconfig-pregenerated=true - - resources: - requests: - memory: {{ or .Authentication.Aws.MemoryRequest "20Mi" }} - cpu: {{ or .Authentication.Aws.CPURequest "10m" }} - limits: - memory: {{ or .Authentication.Aws.MemoryLimit "20Mi" }} - cpu: {{ or .Authentication.Aws.CPULimit "100m" }} - - volumeMounts: - - name: config - mountPath: /etc/aws-iam-authenticator/ - - name: state - mountPath: /var/aws-iam-authenticator/ - - name: output - mountPath: /etc/kubernetes/aws-iam-authenticator/ - - volumes: - - name: config - configMap: - name: aws-iam-authenticator - - name: output - hostPath: - path: /srv/kubernetes/aws-iam-authenticator/ - - name: state - hostPath: - path: /srv/kubernetes/aws-iam-authenticator/ -`) - -func cloudupResourcesAddonsAuthenticationAwsK8s110YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsAuthenticationAwsK8s110YamlTemplate, nil -} - -func cloudupResourcesAddonsAuthenticationAwsK8s110YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsAuthenticationAwsK8s110YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - var _cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate = []byte(`--- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition @@ -677,925 +573,553 @@ func cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml() (*asset, error) { return a, nil } -var _cloudupResourcesAddonsAuthenticationKopeIoK8s18Yaml = []byte(`apiVersion: v1 -kind: Namespace +var _cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate = []byte(`{{ with .ClusterAutoscaler }} +# Sourced from https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws/examples +--- +apiVersion: v1 +kind: ServiceAccount metadata: - name: kopeio-auth labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" - + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler + name: cluster-autoscaler + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-autoscaler + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["events", "endpoints"] + verbs: ["create", "patch"] + - apiGroups: [""] + resources: ["pods/eviction"] + verbs: ["create"] + - apiGroups: [""] + resources: ["pods/status"] + verbs: ["update"] + - apiGroups: [""] + resources: ["endpoints"] + resourceNames: ["cluster-autoscaler"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["watch", "list", "get", "update"] + - apiGroups: [""] + resources: + - "pods" + - "services" + - "replicationcontrollers" + - "persistentvolumeclaims" + - "persistentvolumes" + verbs: ["watch", "list", "get"] + - apiGroups: ["extensions"] + resources: ["replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["watch", "list"] + - apiGroups: ["apps"] + resources: ["statefulsets", "replicasets", "daemonsets"] + verbs: ["watch", "list", "get"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "csinodes"] + verbs: ["watch", "list", "get"] + - apiGroups: ["batch", "extensions"] + resources: ["jobs"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] + - apiGroups: ["coordination.k8s.io"] + resourceNames: ["cluster-autoscaler"] + resources: ["leases"] + verbs: ["get", "update"] --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +rules: + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["create","list","watch"] + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] + verbs: ["delete", "get", "update", "watch"] -apiVersion: v1 -kind: Service +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - name: auth-api - namespace: kopeio-auth + name: cluster-autoscaler labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - selector: - app: auth-api - ports: - - port: 443 - targetPort: 9002 + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system --- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cluster-autoscaler + namespace: kube-system + labels: + k8s-addon: cluster-autoscaler.addons.k8s.io + k8s-app: cluster-autoscaler +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cluster-autoscaler +subjects: + - kind: ServiceAccount + name: cluster-autoscaler + namespace: kube-system -apiVersion: extensions/v1beta1 -kind: DaemonSet +--- +apiVersion: apps/v1 +kind: Deployment metadata: - name: auth-api - namespace: kopeio-auth + name: cluster-autoscaler + namespace: kube-system labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" + app: cluster-autoscaler spec: + replicas: 1 + selector: + matchLabels: + app: cluster-autoscaler template: metadata: labels: - app: auth-api - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' + app: cluster-autoscaler spec: - serviceAccountName: auth-api - hostNetwork: true - nodeSelector: - node-role.kubernetes.io/master: "" + serviceAccountName: cluster-autoscaler tolerations: - - effect: NoSchedule + - operator: "Exists" key: node-role.kubernetes.io/master - - key: "CriticalAddonsOnly" - operator: "Exists" + nodeSelector: + node-role.kubernetes.io/master: "" containers: - - name: auth-api - image: kopeio/auth-api:1.0.20171125 - imagePullPolicy: Always - ports: - - containerPort: 9001 - command: - - /auth-api - - --listen=127.0.0.1:9001 - - --secure-port=9002 - - --etcd-servers=http://127.0.0.1:4001 - - --v=8 - - --storage-backend=etcd2 - ---- - -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1alpha1.auth.kope.io - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - insecureSkipTLSVerify: true - group: auth.kope.io - groupPriorityMinimum: 1000 - versionPriority: 15 - service: - name: auth-api - namespace: kopeio-auth - version: v1alpha1 + - image: {{ .Image }} + name: cluster-autoscaler + resources: + limits: + cpu: 100m + memory: 300Mi + requests: + cpu: 100m + memory: 300Mi + command: + - ./cluster-autoscaler + - --balance-similar-node-groups={{ .BalanceSimilarNodeGroups }} + - --cloud-provider={{ $.CloudProvider }} + - --expander={{ .Expander }} + {{ range $name, $spec := GetNodeInstanceGroups }} + - --nodes={{ $spec.MinSize }}:{{ $spec.MaxSize }}:{{ $name }}.{{ ClusterName }} + {{ end }} + - --scale-down-utilization-threshold={{ .ScaleDownUtilizationThreshold }} + - --skip-nodes-with-local-storage={{ .SkipNodesWithLocalStorage }} + - --skip-nodes-with-system-pods={{ .SkipNodesWithSystemPods }} + - --stderrthreshold=info + - --v=2 + ports: + - containerPort: 8085 + protocol: TCP + livenessProbe: + failureThreshold: 3 + httpGet: + path: /health-check + port: 8085 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 +{{ end }}`) ---- +func cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate, nil +} -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1alpha1.config.auth.kope.io - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - insecureSkipTLSVerify: true - group: config.auth.kope.io - groupPriorityMinimum: 1000 - versionPriority: 15 - service: - name: auth-api - namespace: kopeio-auth - version: v1alpha1 +func cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplateBytes() + if err != nil { + return nil, err + } ---- + info := bindataFileInfo{name: "cloudup/resources/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} -kind: ServiceAccount -apiVersion: v1 +var _cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml = []byte(`kind: Addons metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" + name: core +spec: + addons: + - version: 1.4.0 + selector: + k8s-addon: core.addons.k8s.io + manifest: v1.4.0.yaml ---- +`) -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kopeio-auth:auth-api:auth-reader - namespace: kube-system - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: auth-api - namespace: kopeio-auth +func cloudupResourcesAddonsCoreAddonsK8sIoAddonYamlBytes() ([]byte, error) { + return _cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml, nil +} ---- +func cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml() (*asset, error) { + bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoAddonYamlBytes() + if err != nil { + return nil, err + } -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding + info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/addon.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole metadata: - name: kopeio-auth:system:auth-delegator + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: auth-api - namespace: kopeio-auth + kubernetes.io/bootstrapping: rbac-defaults + name: system:cloud-controller-manager +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - nodes + verbs: + - '*' +- apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update +- apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create +- apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - secrets + verbs: + - list --- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 +apiVersion: v1 +kind: ServiceAccount metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -rules: -- apiGroups: ["auth.kope.io"] - resources: ["users"] - verbs: ["get", "list", "watch"] + name: cloud-controller-manager + namespace: kube-system --- -apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" + name: system:cloud-controller-manager roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: auth-api + name: system:cloud-controller-manager subjects: - kind: ServiceAccount - name: auth-api - namespace: kopeio-auth -`) - -func cloudupResourcesAddonsAuthenticationKopeIoK8s18YamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsAuthenticationKopeIoK8s18Yaml, nil -} - -func cloudupResourcesAddonsAuthenticationKopeIoK8s18Yaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsAuthenticationKopeIoK8s18YamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} + name: cloud-controller-manager + namespace: kube-system -var _cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate = []byte(`{{ with .ClusterAutoscaler }} -# Sourced from https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws/examples --- -apiVersion: v1 -kind: ServiceAccount + +apiVersion: apps/v1 +kind: DaemonSet metadata: labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler - name: cluster-autoscaler + k8s-app: cloud-controller-manager + name: cloud-controller-manager namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["events", "endpoints"] - verbs: ["create", "patch"] - - apiGroups: [""] - resources: ["pods/eviction"] - verbs: ["create"] - - apiGroups: [""] - resources: ["pods/status"] - verbs: ["update"] - - apiGroups: [""] - resources: ["endpoints"] - resourceNames: ["cluster-autoscaler"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["nodes"] - verbs: ["watch", "list", "get", "update"] - - apiGroups: [""] - resources: - - "pods" - - "services" - - "replicationcontrollers" - - "persistentvolumeclaims" - - "persistentvolumes" - verbs: ["watch", "list", "get"] - - apiGroups: ["extensions"] - resources: ["replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["policy"] - resources: ["poddisruptionbudgets"] - verbs: ["watch", "list"] - - apiGroups: ["apps"] - resources: ["statefulsets", "replicasets", "daemonsets"] - verbs: ["watch", "list", "get"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses", "csinodes"] - verbs: ["watch", "list", "get"] - - apiGroups: ["batch", "extensions"] - resources: ["jobs"] - verbs: ["get", "list", "watch", "patch"] - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create"] - - apiGroups: ["coordination.k8s.io"] - resourceNames: ["cluster-autoscaler"] - resources: ["leases"] - verbs: ["get", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -rules: - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["create","list","watch"] - - apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"] - verbs: ["delete", "get", "update", "watch"] - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cluster-autoscaler - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - k8s-addon: cluster-autoscaler.addons.k8s.io - k8s-app: cluster-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: cluster-autoscaler -subjects: - - kind: ServiceAccount - name: cluster-autoscaler - namespace: kube-system - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: cluster-autoscaler - namespace: kube-system - labels: - app: cluster-autoscaler spec: - replicas: 1 selector: matchLabels: - app: cluster-autoscaler + k8s-app: cloud-controller-manager template: metadata: labels: - app: cluster-autoscaler + k8s-app: cloud-controller-manager spec: - serviceAccountName: cluster-autoscaler - tolerations: - - operator: "Exists" - key: node-role.kubernetes.io/master nodeSelector: node-role.kubernetes.io/master: "" + priorityClassName: system-node-critical + serviceAccountName: cloud-controller-manager containers: - - image: {{ .Image }} - name: cluster-autoscaler - resources: - limits: - cpu: 100m - memory: 300Mi - requests: - cpu: 100m - memory: 300Mi - command: - - ./cluster-autoscaler - - --balance-similar-node-groups={{ .BalanceSimilarNodeGroups }} - - --cloud-provider={{ $.CloudProvider }} - - --expander={{ .Expander }} - {{ range $name, $spec := GetNodeInstanceGroups }} - - --nodes={{ $spec.MinSize }}:{{ $spec.MaxSize }}:{{ $name }}.{{ ClusterName }} - {{ end }} - - --scale-down-utilization-threshold={{ .ScaleDownUtilizationThreshold }} - - --skip-nodes-with-local-storage={{ .SkipNodesWithLocalStorage }} - - --skip-nodes-with-system-pods={{ .SkipNodesWithSystemPods }} - - --stderrthreshold=info - - --v=2 - ports: - - containerPort: 8085 - protocol: TCP - livenessProbe: - failureThreshold: 3 - httpGet: - path: /health-check - port: 8085 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 -{{ end }}`) + - name: cloud-controller-manager + # for in-tree providers we use k8s.gcr.io/cloud-controller-manager + # this can be replaced with any other image for out-of-tree providers + image: k8s.gcr.io/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work? + command: + - /usr/local/bin/cloud-controller-manager + - --cloud-provider={{ .CloudProvider }} + - --leader-elect=true + - --use-service-account-credentials + # these flags will vary for every cloud provider + - --allocate-node-cidrs=true + - --configure-cloud-routes=true + - --cluster-cidr={{ .KubeControllerManager.ClusterCIDR }} + volumeMounts: + - name: ca-certificates + mountPath: /etc/ssl/certs + hostNetwork: true + dnsPolicy: Default + volumes: + - name: ca-certificates + hostPath: + path: /etc/ssl/certs + tolerations: + # this is required so CCM can bootstrap itself + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + # this is to have the daemonset runnable on master nodes + # the taint may vary depending on your cluster setup + - key: node-role.kubernetes.io/master + effect: NoSchedule + # this is to restrict CCM to only run on master nodes + # the node selector may vary depending on your cluster setup + - key: "CriticalAddonsOnly" + operator: "Exists" -func cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate, nil +`) + +func cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate, nil } -func cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplateBytes() +func cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplateBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "cloudup/resources/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml = []byte(`kind: Addons +var _cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml = []byte(`--- +apiVersion: v1 +kind: Namespace metadata: - name: core -spec: - addons: - - version: 1.4.0 - selector: - k8s-addon: core.addons.k8s.io - manifest: v1.4.0.yaml - + name: kube-system `) -func cloudupResourcesAddonsCoreAddonsK8sIoAddonYamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml, nil +func cloudupResourcesAddonsCoreAddonsK8sIoV140YamlBytes() ([]byte, error) { + return _cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml, nil } -func cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoAddonYamlBytes() +func cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml() (*asset, error) { + bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoV140YamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/addon.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: rbac.authorization.k8s.io/v1 +var _cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + k8s-addon: coredns.addons.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults - name: system:cloud-controller-manager + k8s-addon: coredns.addons.k8s.io + name: system:coredns rules: - apiGroups: - "" resources: - - events + - endpoints + - services + - pods + - namespaces verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - nodes - verbs: - - '*' -- apiGroups: - - "" - resources: - - services - verbs: - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - endpoints - verbs: - - create - - get - list - - update - watch - apiGroups: - "" resources: - - serviceaccounts - verbs: - - create -- apiGroups: - - "" - resources: - - persistentvolumes + - nodes verbs: - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - list - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cloud-controller-manager - namespace: kube-system - --- - -kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - name: system:cloud-controller-manager + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + k8s-addon: coredns.addons.k8s.io + name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: system:cloud-controller-manager + name: system:coredns subjects: - kind: ServiceAccount - name: cloud-controller-manager + name: coredns namespace: kube-system - --- - -apiVersion: apps/v1 -kind: DaemonSet +apiVersion: v1 +kind: ConfigMap metadata: + name: coredns + namespace: kube-system labels: - k8s-app: cloud-controller-manager - name: cloud-controller-manager + addonmanager.kubernetes.io/mode: EnsureExists +data: + Corefile: | + {{- if KubeDNS.ExternalCoreFile }} +{{ KubeDNS.ExternalCoreFile | indent 4 }} + {{- else }} + .:53 { + errors + health { + lameduck 5s + } + kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + forward . /etc/resolv.conf { + max_concurrent 1000 + } + loop + cache 30 + loadbalance + reload + } + {{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns-autoscaler namespace: kube-system + labels: + k8s-addon: coredns.addons.k8s.io + k8s-app: coredns-autoscaler + kubernetes.io/cluster-service: "true" spec: selector: matchLabels: - k8s-app: cloud-controller-manager + k8s-app: coredns-autoscaler template: metadata: labels: - k8s-app: cloud-controller-manager + k8s-app: coredns-autoscaler + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' spec: - nodeSelector: - node-role.kubernetes.io/master: "" - priorityClassName: system-node-critical - serviceAccountName: cloud-controller-manager containers: - - name: cloud-controller-manager - # for in-tree providers we use k8s.gcr.io/cloud-controller-manager - # this can be replaced with any other image for out-of-tree providers - image: k8s.gcr.io/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work? + - name: autoscaler + image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.3 + resources: + requests: + cpu: "20m" + memory: "10Mi" command: - - /usr/local/bin/cloud-controller-manager - - --cloud-provider={{ .CloudProvider }} - - --leader-elect=true - - --use-service-account-credentials - # these flags will vary for every cloud provider - - --allocate-node-cidrs=true - - --configure-cloud-routes=true - - --cluster-cidr={{ .KubeControllerManager.ClusterCIDR }} - volumeMounts: - - name: ca-certificates - mountPath: /etc/ssl/certs - hostNetwork: true - dnsPolicy: Default - volumes: - - name: ca-certificates - hostPath: - path: /etc/ssl/certs + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 + priorityClassName: system-cluster-critical tolerations: - # this is required so CCM can bootstrap itself - - key: node.cloudprovider.kubernetes.io/uninitialized - value: "true" - effect: NoSchedule - # this is to have the daemonset runnable on master nodes - # the taint may vary depending on your cluster setup - - key: node-role.kubernetes.io/master - effect: NoSchedule - # this is to restrict CCM to only run on master nodes - # the node selector may vary depending on your cluster setup - key: "CriticalAddonsOnly" operator: "Exists" - -`) - -func cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate, nil -} - -func cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsCoreAddonsK8sIoK8s17YamlTemplate = []byte(`apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole + serviceAccountName: coredns-autoscaler +--- +apiVersion: apps/v1 +kind: Deployment metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - labels: - kubernetes.io/bootstrapping: rbac-defaults - name: system:cloud-controller-manager -rules: -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - nodes - verbs: - - '*' -- apiGroups: - - "" - resources: - - services - verbs: - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - endpoints - verbs: - - create - - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create -- apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - list - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cloud-controller-manager - namespace: kube-system - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: system:cloud-controller-manager -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:cloud-controller-manager -subjects: -- kind: ServiceAccount - name: cloud-controller-manager - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - labels: - k8s-app: cloud-controller-manager - name: cloud-controller-manager - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: cloud-controller-manager - template: - metadata: - labels: - k8s-app: cloud-controller-manager - spec: - nodeSelector: - node-role.kubernetes.io/master: "" - serviceAccountName: cloud-controller-manager - containers: - - name: cloud-controller-manager - # for in-tree providers we use k8s.gcr.io/cloud-controller-manager - # this can be replaced with any other image for out-of-tree providers - image: k8s.gcr.io/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work? - command: - - /usr/local/bin/cloud-controller-manager - - --cloud-provider={{ .CloudProvider }} - - --leader-elect=true - - --use-service-account-credentials - # these flags will vary for every cloud provider - - --allocate-node-cidrs=true - - --configure-cloud-routes=true - - --cluster-cidr={{ .KubeControllerManager.ClusterCIDR }} - volumeMounts: - - name: ca-certificates - mountPath: /etc/ssl/certs - hostNetwork: true - dnsPolicy: Default - volumes: - - name: ca-certificates - hostPath: - path: /etc/ssl/certs - tolerations: - # this is required so CCM can bootstrap itself - - key: node.cloudprovider.kubernetes.io/uninitialized - value: "true" - effect: NoSchedule - # this is to have the daemonset runnable on master nodes - # the taint may vary depending on your cluster setup - - key: node-role.kubernetes.io/master - effect: NoSchedule - # this is to restrict CCM to only run on master nodes - # the node selector may vary depending on your cluster setup - - key: "CriticalAddonsOnly" - operator: "Exists" - -`) - -func cloudupResourcesAddonsCoreAddonsK8sIoK8s17YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsCoreAddonsK8sIoK8s17YamlTemplate, nil -} - -func cloudupResourcesAddonsCoreAddonsK8sIoK8s17YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoK8s17YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml = []byte(`--- -apiVersion: v1 -kind: Namespace -metadata: - name: kube-system -`) - -func cloudupResourcesAddonsCoreAddonsK8sIoV140YamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml, nil -} - -func cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsCoreAddonsK8sIoV140YamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: v1 -kind: ServiceAccount -metadata: - name: coredns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - k8s-addon: coredns.addons.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - kubernetes.io/bootstrapping: rbac-defaults - k8s-addon: coredns.addons.k8s.io - name: system:coredns -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - labels: - kubernetes.io/bootstrapping: rbac-defaults - k8s-addon: coredns.addons.k8s.io - name: system:coredns -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:coredns -subjects: -- kind: ServiceAccount - name: coredns - namespace: kube-system ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: coredns - namespace: kube-system - labels: - addonmanager.kubernetes.io/mode: EnsureExists -data: - Corefile: | - {{- if KubeDNS.ExternalCoreFile }} -{{ KubeDNS.ExternalCoreFile | indent 4 }} - {{- else }} - .:53 { - errors - health { - lameduck 5s - } - kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - } - prometheus :9153 - forward . /etc/resolv.conf { - max_concurrent 1000 - } - loop - cache 30 - loadbalance - reload - } - {{- end }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coredns-autoscaler - namespace: kube-system - labels: - k8s-addon: coredns.addons.k8s.io - k8s-app: coredns-autoscaler - kubernetes.io/cluster-service: "true" -spec: - selector: - matchLabels: - k8s-app: coredns-autoscaler - template: - metadata: - labels: - k8s-app: coredns-autoscaler - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - containers: - - name: autoscaler - image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.3 - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=coredns-autoscaler - - --target=Deployment/coredns - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - - --logtostderr=true - - --v=2 - priorityClassName: system-cluster-critical - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - serviceAccountName: coredns-autoscaler ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coredns - namespace: kube-system + name: coredns + namespace: kube-system labels: k8s-app: kube-dns k8s-addon: coredns.addons.k8s.io @@ -1802,220 +1326,6 @@ func cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate() (*asset, error return a, nil } -var _cloudupResourcesAddonsCorednsAddonsK8sIoK8s16YamlTemplate = []byte(`apiVersion: v1 -kind: ServiceAccount -metadata: - name: coredns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - k8s-addon: coredns.addons.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - kubernetes.io/bootstrapping: rbac-defaults - k8s-addon: coredns.addons.k8s.io - name: system:coredns -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - labels: - kubernetes.io/bootstrapping: rbac-defaults - k8s-addon: coredns.addons.k8s.io - name: system:coredns -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:coredns -subjects: -- kind: ServiceAccount - name: coredns - namespace: kube-system ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: coredns - namespace: kube-system - labels: - addonmanager.kubernetes.io/mode: EnsureExists -data: - Corefile: | - {{- if KubeDNS.ExternalCoreFile }} -{{ KubeDNS.ExternalCoreFile | indent 4 }} - {{- else }} - .:53 { - errors - health { - lameduck 5s - } - kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - } - prometheus :9153 - forward . /etc/resolv.conf { - max_concurrent 1000 - } - loop - cache 30 - loadbalance - reload - } - {{- end }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coredns - namespace: kube-system - labels: - k8s-app: kube-dns - k8s-addon: coredns.addons.k8s.io - kubernetes.io/cluster-service: "true" -spec: - replicas: 2 - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - spec: - priorityClassName: system-cluster-critical - serviceAccountName: coredns - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - name: coredns - image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.7.0{{ end }} - imagePullPolicy: IfNotPresent - resources: - limits: - memory: {{ KubeDNS.MemoryLimit }} - requests: - cpu: {{ KubeDNS.CPURequest }} - memory: {{ KubeDNS.MemoryRequest }} - args: [ "-conf", "/etc/coredns/Corefile" ] - volumeMounts: - - name: config-volume - mountPath: /etc/coredns - readOnly: true - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - - containerPort: 9153 - name: metrics - protocol: TCP - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - all - readOnlyRootFilesystem: true - livenessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - dnsPolicy: Default - volumes: - - name: config-volume - configMap: - name: coredns - items: - - key: Corefile - path: Corefile ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - annotations: - prometheus.io/port: "9153" - prometheus.io/scrape: "true" - labels: - k8s-addon: coredns.addons.k8s.io - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" -spec: - selector: - k8s-app: kube-dns - clusterIP: {{ KubeDNS.ServerIP }} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP - - name: metrics - port: 9153 - protocol: TCP -`) - -func cloudupResourcesAddonsCorednsAddonsK8sIoK8s16YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsCorednsAddonsK8sIoK8s16YamlTemplate, nil -} - -func cloudupResourcesAddonsCorednsAddonsK8sIoK8s16YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsCorednsAddonsK8sIoK8s16YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - var _cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate = []byte(`--- apiVersion: v1 kind: Secret @@ -2340,185 +1650,52 @@ func cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate() (*asset, return a, nil } -var _cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s16YamlTemplate = []byte(`kind: Deployment -apiVersion: extensions/v1beta1 +var _cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd = []byte(`# ExternalDNS + +ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS providers. + +## What it does + +Inspired by [Kubernetes DNS](https://github.com/kubernetes/dns), Kubernetes' cluster-internal DNS server, ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the [Kubernetes API](https://kubernetes.io/docs/api/) to determine a desired list of DNS records. *Unlike* KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly—e.g. [AWS Route 53](https://aws.amazon.com/route53/) or [Google CloudDNS](https://cloud.google.com/dns/docs/). + +In a broader sense, ExternalDNS allows you to control DNS records dynamically via Kubernetes resources in a DNS provider-agnostic way. + +## Deploying to a Cluster + +The following tutorials are provided: + +* [AWS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md) +* [Azure](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/azure.md) +* [Cloudflare](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/cloudflare.md) +* [DigitalOcean](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/digitalocean.md) +* Google Container Engine + * [Using Google's Default Ingress Controller](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/gke.md) + * [Using the Nginx Ingress Controller](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/nginx-ingress.md) +* [FAQ](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/faq.md) + +## Github repository + +Source code is managed under kubernetes-incubator at [external-dns](https://github.com/kubernetes-incubator/external-dns).`) + +func cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMdBytes() ([]byte, error) { + return _cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd, nil +} + +func cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd() (*asset, error) { + bytes, err := cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMdBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cloudup/resources/addons/external-dns.addons.k8s.io/README.md", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: apps/v1 +kind: Deployment metadata: - name: dns-controller - namespace: kube-system - labels: - k8s-addon: dns-controller.addons.k8s.io - k8s-app: dns-controller - version: v1.19.0-alpha.5 -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: dns-controller - template: - metadata: - labels: - k8s-addon: dns-controller.addons.k8s.io - k8s-app: dns-controller - version: v1.19.0-alpha.5 - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]' - spec: - tolerations: - - key: "node-role.kubernetes.io/master" - effect: NoSchedule - nodeSelector: - node-role.kubernetes.io/master: "" - dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns) - hostNetwork: true - serviceAccount: dns-controller - containers: - - name: dns-controller - image: k8s.gcr.io/kops/dns-controller:1.19.0-alpha.5 - command: -{{ range $arg := DnsControllerArgv }} - - "{{ $arg }}" -{{ end }} -{{- if .EgressProxy }} - env: -{{ range $name, $value := ProxyEnv }} - - name: {{ $name }} - value: {{ $value }} -{{ end }} -{{- end }} -{{- if eq .CloudProvider "digitalocean" }} - env: - - name: DIGITALOCEAN_ACCESS_TOKEN - valueFrom: - secretKeyRef: - name: digitalocean - key: access-token -{{- end }} - resources: - requests: - cpu: 50m - memory: 50Mi - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: dns-controller - namespace: kube-system - labels: - k8s-addon: dns-controller.addons.k8s.io - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - k8s-addon: dns-controller.addons.k8s.io - name: kops:dns-controller -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - ingress - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "extensions" - resources: - - ingresses - verbs: - - get - - list - - watch - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - k8s-addon: dns-controller.addons.k8s.io - name: kops:dns-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kops:dns-controller -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: system:serviceaccount:kube-system:dns-controller -`) - -func cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s16YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s16YamlTemplate, nil -} - -func cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s16YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s16YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd = []byte(`# ExternalDNS - -ExternalDNS synchronizes exposed Kubernetes Services and Ingresses with DNS providers. - -## What it does - -Inspired by [Kubernetes DNS](https://github.com/kubernetes/dns), Kubernetes' cluster-internal DNS server, ExternalDNS makes Kubernetes resources discoverable via public DNS servers. Like KubeDNS, it retrieves a list of resources (Services, Ingresses, etc.) from the [Kubernetes API](https://kubernetes.io/docs/api/) to determine a desired list of DNS records. *Unlike* KubeDNS, however, it's not a DNS server itself, but merely configures other DNS providers accordingly—e.g. [AWS Route 53](https://aws.amazon.com/route53/) or [Google CloudDNS](https://cloud.google.com/dns/docs/). - -In a broader sense, ExternalDNS allows you to control DNS records dynamically via Kubernetes resources in a DNS provider-agnostic way. - -## Deploying to a Cluster - -The following tutorials are provided: - -* [AWS](https://github.com/kubernetes-sigs/external-dns/blob/master/docs/tutorials/aws.md) -* [Azure](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/azure.md) -* [Cloudflare](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/cloudflare.md) -* [DigitalOcean](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/digitalocean.md) -* Google Container Engine - * [Using Google's Default Ingress Controller](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/gke.md) - * [Using the Nginx Ingress Controller](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/tutorials/nginx-ingress.md) -* [FAQ](https://github.com/kubernetes-incubator/external-dns/blob/master/docs/faq.md) - -## Github repository - -Source code is managed under kubernetes-incubator at [external-dns](https://github.com/kubernetes-incubator/external-dns).`) - -func cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMdBytes() ([]byte, error) { - return _cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd, nil -} - -func cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd() (*asset, error) { - bytes, err := cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMdBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/external-dns.addons.k8s.io/README.md", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate = []byte(`apiVersion: apps/v1 -kind: Deployment -metadata: - name: external-dns + name: external-dns namespace: kube-system labels: k8s-addon: external-dns.addons.k8s.io @@ -2623,215 +1800,106 @@ func cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate() (*asset, e return a, nil } -var _cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s16YamlTemplate = []byte(`apiVersion: extensions/v1beta1 -kind: Deployment +var _cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate = []byte(`apiVersion: v1 +kind: ConfigMap metadata: - name: external-dns + name: kops-controller namespace: kube-system labels: - k8s-addon: external-dns.addons.k8s.io - k8s-app: external-dns - version: v0.4.4 + k8s-addon: kops-controller.addons.k8s.io +data: + config.yaml: | + {{ KopsControllerConfig }} + +--- + +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: kops-controller + namespace: kube-system + labels: + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.19.0-alpha.5 spec: - replicas: 1 selector: matchLabels: - k8s-app: external-dns + k8s-app: kops-controller + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 template: metadata: labels: - k8s-addon: external-dns.addons.k8s.io - k8s-app: external-dns - version: v0.4.4 - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]' + k8s-addon: kops-controller.addons.k8s.io + k8s-app: kops-controller + version: v1.19.0-alpha.5 spec: - serviceAccount: external-dns + priorityClassName: system-node-critical tolerations: - key: "node-role.kubernetes.io/master" - effect: NoSchedule + operator: Exists nodeSelector: node-role.kubernetes.io/master: "" + kops.k8s.io/kops-controller-pki: "" dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns) hostNetwork: true + serviceAccount: kops-controller containers: - - name: external-dns - image: registry.opensource.zalan.do/teapot/external-dns:v0.4.4 - args: -{{ range $arg := ExternalDnsArgv }} + - name: kops-controller + image: k8s.gcr.io/kops/kops-controller:1.19.0-alpha.5 + volumeMounts: +{{ if .UseHostCertificates }} + - mountPath: /etc/ssl/certs + name: etc-ssl-certs + readOnly: true +{{ end }} + - mountPath: /etc/kubernetes/kops-controller/config/ + name: kops-controller-config + - mountPath: /etc/kubernetes/kops-controller/pki/ + name: kops-controller-pki + command: +{{ range $arg := KopsControllerArgv }} - "{{ $arg }}" {{ end }} +{{- if KopsSystemEnv }} + env: +{{ range $var := KopsSystemEnv }} + - name: {{ $var.Name }} + value: {{ $var.Value }} +{{ end }} +{{- end }} resources: requests: cpu: 50m memory: 50Mi + securityContext: + runAsNonRoot: true + volumes: +{{ if .UseHostCertificates }} + - hostPath: + path: /etc/ssl/certs + type: DirectoryOrCreate + name: etc-ssl-certs +{{ end }} + - name: kops-controller-config + configMap: + name: kops-controller + - name: kops-controller-pki + hostPath: + path: /etc/kubernetes/kops-controller/ + type: Directory --- apiVersion: v1 kind: ServiceAccount metadata: - name: external-dns + name: kops-controller namespace: kube-system labels: - k8s-addon: external-dns.addons.k8s.io - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - k8s-addon: external-dns.addons.k8s.io - name: kops:external-dns -rules: -- apiGroups: - - "" - resources: - - services - verbs: - - list -- apiGroups: - - extensions - resources: - - ingresses - verbs: - - list - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - k8s-addon: external-dns.addons.k8s.io - name: kops:external-dns -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kops:external-dns -subjects: -- kind: ServiceAccount - name: external-dns - namespace: kube-system -`) - -func cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s16YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s16YamlTemplate, nil -} - -func cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s16YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s16YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate = []byte(`apiVersion: v1 -kind: ConfigMap -metadata: - name: kops-controller - namespace: kube-system - labels: - k8s-addon: kops-controller.addons.k8s.io -data: - config.yaml: | - {{ KopsControllerConfig }} - ---- - -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: kops-controller - namespace: kube-system - labels: - k8s-addon: kops-controller.addons.k8s.io - k8s-app: kops-controller - version: v1.19.0-alpha.5 -spec: - selector: - matchLabels: - k8s-app: kops-controller - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-addon: kops-controller.addons.k8s.io - k8s-app: kops-controller - version: v1.19.0-alpha.5 - spec: - priorityClassName: system-node-critical - tolerations: - - key: "node-role.kubernetes.io/master" - operator: Exists - nodeSelector: - node-role.kubernetes.io/master: "" - kops.k8s.io/kops-controller-pki: "" - dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns) - hostNetwork: true - serviceAccount: kops-controller - containers: - - name: kops-controller - image: k8s.gcr.io/kops/kops-controller:1.19.0-alpha.5 - volumeMounts: -{{ if .UseHostCertificates }} - - mountPath: /etc/ssl/certs - name: etc-ssl-certs - readOnly: true -{{ end }} - - mountPath: /etc/kubernetes/kops-controller/config/ - name: kops-controller-config - - mountPath: /etc/kubernetes/kops-controller/pki/ - name: kops-controller-pki - command: -{{ range $arg := KopsControllerArgv }} - - "{{ $arg }}" -{{ end }} -{{- if KopsSystemEnv }} - env: -{{ range $var := KopsSystemEnv }} - - name: {{ $var.Name }} - value: {{ $var.Value }} -{{ end }} -{{- end }} - resources: - requests: - cpu: 50m - memory: 50Mi - securityContext: - runAsNonRoot: true - volumes: -{{ if .UseHostCertificates }} - - hostPath: - path: /etc/ssl/certs - type: DirectoryOrCreate - name: etc-ssl-certs -{{ end }} - - name: kops-controller-config - configMap: - name: kops-controller - - name: kops-controller-pki - hostPath: - path: /etc/kubernetes/kops-controller/ - type: Directory ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kops-controller - namespace: kube-system - labels: - k8s-addon: kops-controller.addons.k8s.io + k8s-addon: kops-controller.addons.k8s.io --- @@ -3301,419 +2369,91 @@ func cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate() (*asset, error return a, nil } -var _cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s16YamlTemplate = []byte(`# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -{{- if or (.KubeDNS.UpstreamNameservers) (.KubeDNS.StubDomains) }} -apiVersion: v1 -kind: ConfigMap +var _cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml = []byte(`--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding metadata: - name: kube-dns - namespace: kube-system -data: - {{- if .KubeDNS.UpstreamNameservers }} - upstreamNameservers: | - {{ ToJSON .KubeDNS.UpstreamNameservers }} - {{- end }} - {{- if .KubeDNS.StubDomains }} - stubDomains: | - {{ ToJSON .KubeDNS.StubDomains }} - {{- end }} + name: kops:system:kubelet-api-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:kubelet-api-admin +subjects: +# TODO: perhaps change the client cerificate, place into a group and using a group selector instead? +- apiGroup: rbac.authorization.k8s.io + kind: User + name: kubelet-api +`) ---- -{{- end }} +func cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19YamlBytes() ([]byte, error) { + return _cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml, nil +} -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - k8s-app: kube-dns-autoscaler - kubernetes.io/cluster-service: "true" -spec: - template: - metadata: - labels: - k8s-app: kube-dns-autoscaler - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - containers: - - name: autoscaler - image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.3 - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=kube-dns-autoscaler - # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - - --target=Deployment/kube-dns - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - - --logtostderr=true - - --v=2 - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - serviceAccountName: kube-dns-autoscaler +func cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml() (*asset, error) { + bytes, err := cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19YamlBytes() + if err != nil { + return nil, err + } ---- + info := bindataFileInfo{name: "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} -apiVersion: extensions/v1beta1 -kind: Deployment +var _cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml = []byte(`kind: Addons metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" + name: limit-range spec: - # replicas: not specified here: - # 1. In order to make Addon Manager do not reconcile this replicas parameter. - # 2. Default is 1. - # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. - strategy: - rollingUpdate: - maxSurge: 10% - maxUnavailable: 0 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - prometheus.io/scrape: 'true' - prometheus.io/port: '10055' - spec: - dnsPolicy: Default # Don't use cluster DNS. - serviceAccountName: kube-dns - volumes: - - name: kube-dns-config - configMap: - name: kube-dns - optional: true - - containers: - - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns:1.15.13 - resources: - # TODO: Set memory limits when we've profiled the container for large - # clusters, then set request = limit to keep this container in - # guaranteed class. Currently, this container falls into the - # "burstable" category so the kubelet doesn't backoff from restarting it. - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - livenessProbe: - httpGet: - path: /healthcheck/kubedns - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /readiness - port: 8081 - scheme: HTTP - # we poll on pod startup for the Kubernetes master service and - # only setup the /readiness HTTP server once that's available. - initialDelaySeconds: 3 - timeoutSeconds: 5 - args: - - --config-dir=/kube-dns-config - - --dns-port=10053 - - --domain={{ KubeDNS.Domain }}. - - --v=2 - env: - - name: PROMETHEUS_PORT - value: "10055" - ports: - - containerPort: 10053 - name: dns-local - protocol: UDP - - containerPort: 10053 - name: dns-tcp-local - protocol: TCP - - containerPort: 10055 - name: metrics - protocol: TCP - volumeMounts: - - name: kube-dns-config - mountPath: /kube-dns-config + addons: + - version: 1.5.0 + selector: + k8s-addon: limit-range.addons.k8s.io + manifest: v1.5.0.yaml +`) - - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.15.13 - livenessProbe: - httpGet: - path: /healthcheck/dnsmasq - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - -v=2 - - -logtostderr - - -configDir=/etc/k8s/dns/dnsmasq-nanny - - -restartDnsmasq=true - - -- - - -k - - --cache-size={{ KubeDNS.CacheMaxSize }} - - --dns-forward-max={{ KubeDNS.CacheMaxConcurrent }} - - --no-negcache - - --log-facility=- - - --server=/{{ KubeDNS.Domain }}/127.0.0.1#10053 - - --server=/in-addr.arpa/127.0.0.1#10053 - - --server=/in6.arpa/127.0.0.1#10053 - - --min-port=1024 - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - # see: https://github.com/kubernetes/kubernetes/issues/29055 for details - resources: - requests: - cpu: 150m - memory: 20Mi - volumeMounts: - - name: kube-dns-config - mountPath: /etc/k8s/dns/dnsmasq-nanny +func cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYamlBytes() ([]byte, error) { + return _cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml, nil +} - - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar:1.15.13 - livenessProbe: - httpGet: - path: /metrics - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - --v=2 - - --logtostderr - - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A - - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A - ports: - - containerPort: 10054 - name: metrics - protocol: TCP - resources: - requests: - memory: 20Mi - cpu: 10m +func cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml() (*asset, error) { + bytes, err := cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYamlBytes() + if err != nil { + return nil, err + } ---- + info := bindataFileInfo{name: "cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} -apiVersion: v1 -kind: Service +var _cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml = []byte(`apiVersion: "v1" +kind: "LimitRange" metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "KubeDNS" + name: "limits" + namespace: default spec: - selector: - k8s-app: kube-dns - clusterIP: {{ KubeDNS.ServerIP }} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - k8s-addon: kube-dns.addons.k8s.io - name: kube-dns-autoscaler -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list"] - - apiGroups: [""] - resources: ["replicationcontrollers/scale"] - verbs: ["get", "update"] - - apiGroups: ["extensions"] - resources: ["deployments/scale", "replicasets/scale"] - verbs: ["get", "update"] -# Remove the configmaps rule once below issue is fixed: -# kubernetes-incubator/cluster-proportional-autoscaler#16 - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "create"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - k8s-addon: kube-dns.addons.k8s.io - name: kube-dns-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-dns-autoscaler -subjects: -- kind: ServiceAccount - name: kube-dns-autoscaler - namespace: kube-system + limits: + - type: "Container" + defaultRequest: + cpu: "100m" `) -func cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s16YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s16YamlTemplate, nil +func cloudupResourcesAddonsLimitRangeAddonsK8sIoV150YamlBytes() ([]byte, error) { + return _cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml, nil } -func cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s16YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s16YamlTemplateBytes() +func cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml() (*asset, error) { + bytes, err := cloudupResourcesAddonsLimitRangeAddonsK8sIoV150YamlBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml = []byte(`--- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kops:system:kubelet-api-admin -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:kubelet-api-admin -subjects: -# TODO: perhaps change the client cerificate, place into a group and using a group selector instead? -- apiGroup: rbac.authorization.k8s.io - kind: User - name: kubelet-api -`) - -func cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19YamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml, nil -} - -func cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19YamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml = []byte(`kind: Addons -metadata: - name: limit-range -spec: - addons: - - version: 1.5.0 - selector: - k8s-addon: limit-range.addons.k8s.io - manifest: v1.5.0.yaml -`) - -func cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml, nil -} - -func cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml = []byte(`apiVersion: "v1" -kind: "LimitRange" -metadata: - name: "limits" - namespace: default -spec: - limits: - - type: "Container" - defaultRequest: - cpu: "100m" -`) - -func cloudupResourcesAddonsLimitRangeAddonsK8sIoV150YamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml, nil -} - -func cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsLimitRangeAddonsK8sIoV150YamlBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml = []byte(`kind: Addons +var _cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml = []byte(`kind: Addons metadata: name: metadata-proxy spec: @@ -4044,155 +2784,6 @@ func cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate() (*asset, return a, nil } -var _cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s110YamlTemplate = []byte(`# Vendored from https://github.com/aws/amazon-vpc-cni-k8s/blob/v1.3.3/config/v1.3/aws-k8s-cni.yaml - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: aws-node -rules: -- apiGroups: - - crd.k8s.amazonaws.com - resources: - - "*" - - namespaces - verbs: - - "*" -- apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: ["list", "watch", "get"] -- apiGroups: ["extensions"] - resources: - - daemonsets - verbs: ["list", "watch"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: aws-node - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: aws-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: aws-node -subjects: -- kind: ServiceAccount - name: aws-node - namespace: kube-system ---- -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: aws-node - namespace: kube-system - labels: - k8s-app: aws-node -spec: - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - k8s-app: aws-node - template: - metadata: - labels: - k8s-app: aws-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - serviceAccountName: aws-node - hostNetwork: true - priorityClassName: system-node-critical - tolerations: - - operator: Exists - containers: - - image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.3.3" }}" - ports: - - containerPort: 61678 - name: metrics - name: aws-node - env: - - name: CLUSTER_NAME - value: {{ ClusterName }} - - name: AWS_VPC_K8S_CNI_LOGLEVEL - value: DEBUG - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- range .Networking.AmazonVPC.Env }} - - name: {{ .Name }} - value: "{{ .Value }}" - {{- end }} - resources: - requests: - cpu: 10m - securityContext: - privileged: true - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - - mountPath: /host/var/log - name: log-dir - - mountPath: /var/run/docker.sock - name: dockersock - volumes: - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - - name: log-dir - hostPath: - path: /var/log - - name: dockersock - hostPath: - path: /var/run/docker.sock ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: eniconfigs.crd.k8s.amazonaws.com -spec: - scope: Cluster - group: crd.k8s.amazonaws.com - version: v1alpha1 - names: - plural: eniconfigs - singular: eniconfig - kind: ENIConfig -`) - -func cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s110YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s110YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s110YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s110YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - var _cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate = []byte(`# Vendored from https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.5.5/config/v1.5/aws-k8s-cni.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole @@ -6387,2441 +4978,1255 @@ func cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate() (*asset, error return a, nil } -var _cloudupResourcesAddonsNetworkingCiliumIoK8s17YamlTemplate = []byte(`apiVersion: v1 -kind: ConfigMap +var _cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate = []byte(`# Pulled and modified from: https://raw.githubusercontent.com/coreos/flannel/v0.13.0/Documentation/kube-flannel.yml + +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy metadata: - name: cilium-config - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" -data: -{{ with .Networking.Cilium }} - # Identity allocation mode selects how identities are shared between cilium - # nodes by setting how they are stored. The options are "crd" or "kvstore". - # - "crd" stores identities in kubernetes as CRDs (custom resource definition). - # These can be queried with: - # kubectl get ciliumid - # - "kvstore" stores identities in a kvstore, etcd or consul, that is - # configured below. Cilium versions before 1.6 supported only the kvstore - # backend. Upgrades from these older cilium versions should continue using - # the kvstore by commenting out the identity-allocation-mode below, or - # setting it to "kvstore". - identity-allocation-mode: crd - # If you want to run cilium in debug mode change this value to true - debug: "{{- if .Debug -}}true{{- else -}}false{{- end -}}" - {{ if .EnablePrometheusMetrics }} - # If you want metrics enabled in all of your Cilium agents, set the port for - # which the Cilium agents will have their metrics exposed. - # This option deprecates the "prometheus-serve-addr" in the - # "cilium-metrics-config" ConfigMap - # NOTE that this will open the port on ALL nodes where Cilium pods are - # scheduled. - prometheus-serve-addr: ":{{- or .AgentPrometheusPort "9090" }}" - {{ end }} - # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 - # address. - enable-ipv4: "true" - # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 - # address. - enable-ipv6: "false" - # If you want cilium monitor to aggregate tracing for packets, set this level - # to "low", "medium", or "maximum". The higher the level, the less packets - # that will be seen in monitor output. - monitor-aggregation: "{{- if eq .MonitorAggregation "" -}}medium{{- else -}}{{ .MonitorAggregation }}{{- end -}}" - # ct-global-max-entries-* specifies the maximum number of connections - # supported across all endpoints, split by protocol: tcp or other. One pair - # of maps uses these values for IPv4 connections, and another pair of maps - # use these values for IPv6 connections. - # - # If these values are modified, then during the next Cilium startup the - # tracking of ongoing connections may be disrupted. This may lead to brief - # policy drops or a change in loadbalancing decisions for a connection. - # - # For users upgrading from Cilium 1.2 or earlier, to minimize disruption - # during the upgrade process, comment out these options. - bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" - bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" - - # Pre-allocation of map entries allows per-packet latency to be reduced, at - # the expense of up-front memory allocation for the entries in the maps. The - # default value below will minimize memory usage in the default installation; - # users who are sensitive to latency may consider setting this to "true". - # - # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore - # this option and behave as though it is set to "true". - # - # If this value is modified, then during the next Cilium startup the restore - # of existing endpoints and tracking of ongoing connections may be disrupted. - # This may lead to policy drops or a change in loadbalancing decisions for a - # connection for some time. Endpoints may need to be recreated to restore - # connectivity. - # - # If this option is set to "false" during an upgrade from 1.3 or earlier to - # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "{{ .PreallocateBPFMaps }}" - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" - # Encapsulation mode for communication between nodes - # Possible values: - # - disabled - # - vxlan (default) - # - geneve - tunnel: "{{ .Tunnel }}" - - # Name of the cluster. Only relevant when building a mesh of clusters. - cluster-name: "{{ .ClusterName }}" - - # This option is disabled by default starting from version 1.4.x in favor - # of a more powerful DNS proxy-based implementation, see [0] for details. - # Enable this option if you want to use FQDN policies but do not want to use - # the DNS proxy. - # - # To ease upgrade, users may opt to set this option to "true". - # Otherwise please refer to the Upgrade Guide [1] which explains how to - # prepare policy rules for upgrade. - # - # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based - # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action - tofqdns-enable-poller: "{{ .ToFqdnsEnablePoller }}" - # wait-bpf-mount makes init container wait until bpf filesystem is mounted - wait-bpf-mount: "false" - # Enable fetching of container-runtime specific metadata - # - # By default, the Kubernetes pod and namespace labels are retrieved and - # associated with endpoints for identification purposes. By integrating - # with the container runtime, container runtime specific labels can be - # retrieved, such labels will be prefixed with container: - # - # CAUTION: The container runtime labels can include information such as pod - # annotations which may result in each pod being associated a unique set of - # labels which can result in excessive security identities being allocated. - # Please review the labels filter when enabling container runtime labels. - # - # Supported values: - # - containerd - # - crio - # - docker - # - none - # - auto (automatically detect the container runtime) - # - container-runtime: "{{ .ContainerRuntimeLabels }}" - masquerade: "{{- if .DisableMasquerade -}}false{{- else -}}true{{- end -}}" - install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}" - auto-direct-node-routes: "{{- if .AutoDirectNodeRoutes -}}true{{- else -}}false{{- end -}}" - enable-node-port: "{{ .EnableNodePort }}" - {{ with .Ipam }} - ipam: {{ . }} - {{ if eq . "eni" }} - enable-endpoint-routes: "true" - auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" - {{ end }} - {{ end }} -{{ end }} # With .Networking.Cilium end ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium-operator - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" + name: psp.flannel.unprivileged + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default + seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default + apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default + apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default +spec: + privileged: false + volumes: + - configMap + - secret + - emptyDir + - hostPath + allowedHostPaths: + - pathPrefix: "/dev/net" + - pathPrefix: "/etc/cni/net.d" + - pathPrefix: "/etc/kube-flannel" + - pathPrefix: "/run/flannel" + readOnlyRootFilesystem: false + # Users and groups + runAsUser: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + fsGroup: + rule: RunAsAny + # Privilege Escalation + allowPrivilegeEscalation: false + defaultAllowPrivilegeEscalation: false + # Capabilities + allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] + defaultAddCapabilities: [] + requiredDropCapabilities: [] + # Host namespaces + hostPID: false + hostIPC: false + hostNetwork: true + hostPorts: + - min: 0 + max: 65535 + # SELinux + seLinux: + # SELinux is unused in CaaSP + rule: 'RunAsAny' --- -apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: cilium + name: flannel labels: role.kubernetes.io/networking: "1" rules: -- apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - get - - list - - watch +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['psp.flannel.unprivileged'] - apiGroups: - "" resources: - - namespaces - - services - - nodes - - endpoints - - componentstatuses + - pods verbs: - get - - list - - watch - apiGroups: - "" resources: - - pods - nodes verbs: - - get - list - watch - - update - apiGroups: - "" resources: - - nodes - nodes/status verbs: - patch -- apiGroups: - - extensions - resources: - - ingresses - verbs: - - create - - get - - list - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - get - - list - - watch - - update -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumendpoints - - ciliumendpoints/status - - ciliumnodes - - ciliumnodes/status - - ciliumidentities - - ciliumidentities/status - verbs: - - '*' --- +kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole metadata: - name: cilium-operator - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - # to automatically delete [core|kube]dns pods so that are starting to being - # managed by Cilium - - pods - verbs: - - get - - list - - watch - - delete -- apiGroups: - - "" - resources: - # to automatically read from k8s and import the node's pod CIDR to cilium's - # etcd so all nodes know how to reach another pod running in a different - # node. - - nodes - # to perform the translation of a CNP that contains ` + "`" + `ToGroup` + "`" + ` to its endpoints - - services - - endpoints - # to check apiserver connectivity - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumendpoints - - ciliumendpoints/status - - ciliumnodes - - ciliumnodes/status - - ciliumidentities - - ciliumidentities/status - verbs: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium + name: flannel labels: role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: cilium + name: flannel subjects: - kind: ServiceAccount - name: cilium + name: flannel namespace: kube-system --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +apiVersion: v1 +kind: ServiceAccount metadata: - name: cilium-operator + name: flannel + namespace: kube-system labels: role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium-operator -subjects: -- kind: ServiceAccount - name: cilium-operator +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg namespace: kube-system + labels: + k8s-app: flannel + tier: node + app: flannel + role.kubernetes.io/networking: "1" +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "{{ .NonMasqueradeCIDR }}", + "Backend": { + "Type": "{{ FlannelBackendType }}" + } + } --- apiVersion: apps/v1 kind: DaemonSet metadata: + name: kube-flannel-ds + namespace: kube-system labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" + k8s-app: flannel + tier: node + app: flannel role.kubernetes.io/networking: "1" - name: cilium - namespace: kube-system spec: selector: matchLabels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" + tier: node + app: flannel + role.kubernetes.io/networking: "1" template: metadata: - annotations: - # This annotation plus the CriticalAddonsOnly toleration makes - # cilium to be a critical pod in the cluster, which ensures cilium - # gets priority scheduling. - # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ - scheduler.alpha.kubernetes.io/critical-pod: "" - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" + tier: node + app: flannel + role.kubernetes.io/networking: "1" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + serviceAccountName: flannel + initContainers: + - name: install-cni + image: quay.io/coreos/flannel:v0.13.0 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ containers: - - args: - - --config-dir=/tmp/cilium/config-map + - name: kube-flannel + image: quay.io/coreos/flannel:v0.13.0 command: - - cilium-agent + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + - --iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }} + resources: + limits: + memory: 100Mi + requests: + cpu: 100m + memory: 100Mi + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] env: - - name: K8S_NODE_NAME + - name: POD_NAME valueFrom: fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_K8S_NAMESPACE + fieldPath: metadata.name + - name: POD_NAMESPACE valueFrom: fieldRef: - apiVersion: v1 fieldPath: metadata.namespace - - name: CILIUM_FLANNEL_MASTER_DEVICE - valueFrom: - configMapKeyRef: - key: flannel-master-device - name: cilium-config - optional: true - - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT - valueFrom: - configMapKeyRef: - key: flannel-uninstall-on-exit - name: cilium-config - optional: true - - name: CILIUM_CLUSTERMESH_CONFIG - value: /var/lib/cilium/clustermesh/ - - name: CILIUM_CNI_CHAINING_MODE - valueFrom: - configMapKeyRef: - key: cni-chaining-mode - name: cilium-config - optional: true - - name: CILIUM_CUSTOM_CNI_CONF - valueFrom: - configMapKeyRef: - key: custom-cni-conf - name: cilium-config - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ .MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" - {{ with .Networking.Cilium.EnablePolicy }} - - name: CILIUM_ENABLE_POLICY - value: {{ . }} - {{ end }} -{{ with .Networking.Cilium }} - image: "docker.io/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - lifecycle: - postStart: - exec: - command: - - /cni-install.sh - preStop: - exec: - command: - - /cni-uninstall.sh - livenessProbe: - exec: - command: - - cilium - - status - - --brief - failureThreshold: 10 - # The initial delay for the liveness probe is intentionally large to - # avoid an endless kill & restart cycle if in the event that the initial - # bootstrapping takes longer than expected. - initialDelaySeconds: 120 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - name: cilium-agent - {{ if .EnablePrometheusMetrics }} - ports: - - containerPort: {{ .AgentPrometheusPort }} - hostPort: {{ .AgentPrometheusPort }} - name: prometheus - protocol: TCP - {{ end }} - readinessProbe: - exec: - command: - - cilium - - status - - --brief - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - securityContext: - capabilities: - add: - - NET_ADMIN - - SYS_MODULE - privileged: true volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - - mountPath: /var/run/cilium - name: cilium-run - - mountPath: /host/opt/cni/bin - name: cni-path - - mountPath: /host/etc/cni/net.d - name: etc-cni-netd - - mountPath: /var/lib/cilium/clustermesh - name: clustermesh-secrets - readOnly: true - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true - # Needed to be able to load kernel modules - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - hostNetwork: true - initContainers: - - command: - - /init-container.sh - env: - - name: CILIUM_ALL_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-state - name: cilium-config - optional: true - - name: CILIUM_BPF_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-bpf-state - name: cilium-config - optional: true - - name: CILIUM_WAIT_BPF_MOUNT - valueFrom: - configMapKeyRef: - key: wait-bpf-mount - name: cilium-config - optional: true - image: "docker.io/cilium/cilium:{{ .Version }}" -## end of ` + "`" + `with .Networking.Cilium` + "`" + ` -#{{ end }} - imagePullPolicy: IfNotPresent - name: clean-cilium-state - securityContext: - capabilities: - add: - - NET_ADMIN - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - - mountPath: /var/run/cilium - name: cilium-run - restartPolicy: Always - serviceAccount: cilium - serviceAccountName: cilium - terminationGracePeriodSeconds: 1 - tolerations: - - operator: Exists + - name: run + mountPath: /run/flannel + - name: dev-net + mountPath: /dev/net + - name: flannel-cfg + mountPath: /etc/kube-flannel/ volumes: - # To keep state between restarts / upgrades - - hostPath: - path: /var/run/cilium - type: DirectoryOrCreate - name: cilium-run - # To keep state between restarts / upgrades for bpf maps - - hostPath: - path: /sys/fs/bpf - type: DirectoryOrCreate - name: bpf-maps - # To install cilium cni plugin in the host - - hostPath: - path: /opt/cni/bin - type: DirectoryOrCreate - name: cni-path - # To install cilium cni configuration in the host - - hostPath: + - name: run + hostPath: + path: /run/flannel + - name: dev-net + hostPath: + path: /dev/net + - name: cni + hostPath: path: /etc/cni/net.d - type: DirectoryOrCreate - name: etc-cni-netd - # To be able to load kernel modules - - hostPath: - path: /lib/modules - name: lib-modules - # To access iptables concurrently with other processes (e.g. kube-proxy) - - hostPath: - path: /run/xtables.lock - type: FileOrCreate - name: xtables-lock - # To read the clustermesh configuration - - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path - updateStrategy: - rollingUpdate: - maxUnavailable: 2 - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - io.cilium/app: operator - name: cilium-operator - role.kubernetes.io/networking: "1" - name: cilium-operator - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - io.cilium/app: operator - name: cilium-operator - spec: - containers: - - args: - - --debug=$(CILIUM_DEBUG) -{{ with .Networking.Cilium }} - {{ if .EnablePrometheusMetrics }} - - --enable-metrics - {{ end }} -{{ end }} - command: - - cilium-operator - env: - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: cilium-config - optional: true - - name: CILIUM_CLUSTER_NAME - valueFrom: - configMapKeyRef: - key: cluster-name - name: cilium-config - optional: true - - name: CILIUM_CLUSTER_ID - valueFrom: - configMapKeyRef: - key: cluster-id - name: cilium-config - optional: true - - name: CILIUM_IPAM - valueFrom: - configMapKeyRef: - key: ipam - name: cilium-config - optional: true - - name: CILIUM_DISABLE_ENDPOINT_CRD - valueFrom: - configMapKeyRef: - key: disable-endpoint-crd - name: cilium-config - optional: true - - name: CILIUM_KVSTORE - valueFrom: - configMapKeyRef: - key: kvstore - name: cilium-config - optional: true - - name: CILIUM_KVSTORE_OPT - valueFrom: - configMapKeyRef: - key: kvstore-opt - name: cilium-config - optional: true - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - key: AWS_ACCESS_KEY_ID - name: cilium-aws - optional: true - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AWS_SECRET_ACCESS_KEY - name: cilium-aws - optional: true - - name: AWS_DEFAULT_REGION - valueFrom: - secretKeyRef: - key: AWS_DEFAULT_REGION - name: cilium-aws - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ .MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" -{{ with .Networking.Cilium }} - image: "docker.io/cilium/operator:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: cilium-operator - {{ if .EnablePrometheusMetrics }} - ports: - - containerPort: 6942 - hostPort: 6942 - name: prometheus - protocol: TCP - {{ end }} - livenessProbe: - httpGet: - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - timeoutSeconds: 3 - hostNetwork: true - restartPolicy: Always - serviceAccount: cilium-operator - serviceAccountName: cilium-operator - {{if eq .Ipam "eni" }} - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - tolerationSeconds: 300 - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 300 - {{ end }} -{{ end }}`) + - name: flannel-cfg + configMap: + name: kube-flannel-cfg +`) -func cloudupResourcesAddonsNetworkingCiliumIoK8s17YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingCiliumIoK8s17YamlTemplate, nil +func cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate, nil } -func cloudupResourcesAddonsNetworkingCiliumIoK8s17YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingCiliumIoK8s17YamlTemplateBytes() +func cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplateBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate = []byte(`# Pulled and modified from: https://raw.githubusercontent.com/coreos/flannel/v0.13.0/Documentation/kube-flannel.yml - ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy +var _cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml = []byte(`apiVersion: apps/v1 +kind: DaemonSet metadata: - name: psp.flannel.unprivileged - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default - seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default - apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default - apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default + name: kopeio-networking-agent + namespace: kube-system + labels: + k8s-addon: networking.kope.io + role.kubernetes.io/networking: "1" spec: - privileged: false - volumes: - - configMap - - secret - - emptyDir - - hostPath - allowedHostPaths: - - pathPrefix: "/dev/net" - - pathPrefix: "/etc/cni/net.d" - - pathPrefix: "/etc/kube-flannel" - - pathPrefix: "/run/flannel" - readOnlyRootFilesystem: false - # Users and groups - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - fsGroup: - rule: RunAsAny - # Privilege Escalation - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - # Capabilities - allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] - defaultAddCapabilities: [] - requiredDropCapabilities: [] - # Host namespaces - hostPID: false - hostIPC: false - hostNetwork: true - hostPorts: - - min: 0 - max: 65535 - # SELinux - seLinux: - # SELinux is unused in CaaSP - rule: 'RunAsAny' + selector: + matchLabels: + name: kopeio-networking-agent + role.kubernetes.io/networking: "1" + template: + metadata: + labels: + name: kopeio-networking-agent + role.kubernetes.io/networking: "1" + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + spec: + hostPID: true + hostIPC: true + hostNetwork: true + containers: + - resources: + requests: + cpu: 50m + memory: 100Mi + limits: + memory: 100Mi + securityContext: + privileged: true + image: kopeio/networking-agent:1.0.20181028 + name: networking-agent + volumeMounts: + - name: lib-modules + mountPath: /lib/modules + readOnly: true + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + serviceAccountName: kopeio-networking-agent + priorityClassName: system-node-critical + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + volumes: + - name: lib-modules + hostPath: + path: /lib/modules + --- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 + +apiVersion: v1 +kind: ServiceAccount metadata: - name: flannel + name: kopeio-networking-agent + namespace: kube-system labels: + k8s-addon: networking.kope.io role.kubernetes.io/networking: "1" + +--- + +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + k8s-addon: networking.kope.io + name: kopeio:networking-agent rules: -- apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: ['psp.flannel.unprivileged'] -- apiGroups: - - "" - resources: - - pods - verbs: - - get - apiGroups: - "" resources: - nodes verbs: + - get - list - watch + - patch - apiGroups: - "" resources: - nodes/status verbs: - patch + --- + +apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: flannel labels: - role.kubernetes.io/networking: "1" + k8s-addon: networking.kope.io + name: kopeio:networking-agent roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: flannel + name: kopeio:networking-agent subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system ---- +- apiGroup: rbac.authorization.k8s.io + kind: User + name: system:serviceaccount:kube-system:kopeio-networking-agent +`) + +func cloudupResourcesAddonsNetworkingKopeIoK8s112YamlBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml, nil +} + +func cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingKopeIoK8s112YamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate = []byte(`# Pulled and modified from https://raw.githubusercontent.com/cloudnativelabs/kube-router/v1.0.1/daemonset/kubeadm-kuberouter.yaml + apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- kind: ConfigMap -apiVersion: v1 metadata: - name: kube-flannel-cfg + name: kube-router-cfg namespace: kube-system labels: - k8s-app: flannel tier: node - app: flannel - role.kubernetes.io/networking: "1" + k8s-app: kube-router data: cni-conf.json: | { - "name": "cbr0", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true + "cniVersion":"0.3.0", + "name":"mynet", + "plugins":[ + { + "name":"kubernetes", + "type":"bridge", + "bridge":"kube-bridge", + "isDefaultGateway":true, + "ipam":{ + "type":"host-local" + } + }, + { + "type": "portmap", + "capabilities": { + "snat": true, + "portMappings": true + } } - } - ] - } - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "{{ FlannelBackendType }}" - } + ] } --- apiVersion: apps/v1 kind: DaemonSet metadata: - name: kube-flannel-ds - namespace: kube-system labels: - k8s-app: flannel + k8s-app: kube-router tier: node - app: flannel - role.kubernetes.io/networking: "1" + name: kube-router + namespace: kube-system spec: selector: matchLabels: + k8s-app: kube-router tier: node - app: flannel - role.kubernetes.io/networking: "1" template: metadata: labels: + k8s-app: kube-router tier: node - app: flannel - role.kubernetes.io/networking: "1" spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux - hostNetwork: true priorityClassName: system-node-critical - tolerations: - - operator: Exists - serviceAccountName: flannel - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.13.0 - command: - - cp + serviceAccountName: kube-router + containers: + - name: kube-router + image: docker.io/cloudnativelabs/kube-router:v1.0.1 args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.13.0 - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - - --iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }} + - --run-router=true + - --run-firewall=true + - --run-service-proxy=true + - --bgp-graceful-restart=true + - --kubeconfig=/var/lib/kube-router/kubeconfig + - --metrics-port=12013 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_ROUTER_CNI_CONF_FILE + value: /etc/cni/net.d/10-kuberouter.conflist + livenessProbe: + httpGet: + path: /healthz + port: 20244 + initialDelaySeconds: 10 + periodSeconds: 3 resources: - limits: - memory: 100Mi requests: cpu: 100m - memory: 100Mi + memory: 250Mi securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN", "NET_RAW"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + privileged: true volumeMounts: - - name: run - mountPath: /run/flannel - - name: dev-net - mountPath: /dev/net - - name: flannel-cfg - mountPath: /etc/kube-flannel/ + - name: lib-modules + mountPath: /lib/modules + readOnly: true + - name: cni-conf-dir + mountPath: /etc/cni/net.d + - name: kubeconfig + mountPath: /var/lib/kube-router/kubeconfig + readOnly: true + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + initContainers: + - name: install-cni + image: docker.io/cloudnativelabs/kube-router:v1.0.1 + command: + - /bin/sh + - -c + - set -e -x; + if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then + if [ -f /etc/cni/net.d/*.conf ]; then + rm -f /etc/cni/net.d/*.conf; + fi; + TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; + cp /etc/kube-router/cni-conf.json ${TMP}; + mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; + fi + volumeMounts: + - mountPath: /etc/cni/net.d + name: cni-conf-dir + - mountPath: /etc/kube-router + name: kube-router-cfg + hostNetwork: true + tolerations: + - operator: Exists volumes: - - name: run - hostPath: - path: /run/flannel - - name: dev-net + - name: lib-modules hostPath: - path: /dev/net - - name: cni + path: /lib/modules + - name: cni-conf-dir hostPath: path: /etc/cni/net.d - - name: flannel-cfg + - name: kube-router-cfg configMap: - name: kube-flannel-cfg -`) - -func cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingFlannelK8s16YamlTemplate = []byte(`kind: ClusterRole + name: kube-router-cfg + - name: kubeconfig + hostPath: + path: /var/lib/kube-router/kubeconfig + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-router + namespace: kube-system +--- +kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: flannel - labels: - role.kubernetes.io/networking: "1" + name: kube-router + namespace: kube-system rules: - apiGroups: - - "" + - "" resources: + - namespaces - pods + - services + - nodes + - endpoints verbs: + - list - get + - watch - apiGroups: - - "" + - "networking.k8s.io" resources: - - nodes + - networkpolicies verbs: - list + - get - watch - apiGroups: - - "" + - extensions resources: - - nodes/status + - networkpolicies verbs: - - patch + - get + - list + - watch --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: flannel - labels: - role.kubernetes.io/networking: "1" + name: kube-router roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: flannel + name: kube-router subjects: - kind: ServiceAccount - name: flannel - namespace: kube-system ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: flannel + name: kube-router namespace: kube-system - labels: - role.kubernetes.io/networking: "1" +- kind: User + name: system:kube-router +`) + +func cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate, nil +} + +func cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplateBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.9/manifests/calico-typha.yaml + --- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: - name: kube-flannel-cfg + name: calico-config namespace: kube-system labels: - k8s-app: flannel role.kubernetes.io/networking: "1" data: - cni-conf.json: | + # You must set a non-zero value for Typha replicas below. + typha_service_name: "{{- if .Networking.Calico.TyphaReplicas -}}calico-typha{{- else -}}none{{- end -}}" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use + {{- if .Networking.Calico.MTU }} + veth_mtu: "{{ .Networking.Calico.MTU }}" + {{- else }} + veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}" + {{- end }} + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- { - "name": "cbr0", + "name": "k8s-pod-network", + "cniVersion": "0.3.1", "plugins": [ { - "type": "flannel", - "delegate": { - "forceAddress": true, - "isDefaultGateway": true, - "hairpinMode": true + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" } }, { "type": "portmap", - "capabilities": { - "portMappings": true - } + "snat": true, + "capabilities": {"portMappings": true} } ] } - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "{{ FlannelBackendType }}" - } - } + --- -kind: DaemonSet -apiVersion: extensions/v1beta1 +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: kube-flannel-ds - namespace: kube-system + name: felixconfigurations.crd.projectcalico.org labels: - k8s-app: flannel role.kubernetes.io/networking: "1" spec: - template: - metadata: - labels: - tier: node - app: flannel - role.kubernetes.io/networking: "1" - spec: - hostNetwork: true - nodeSelector: - beta.kubernetes.io/arch: amd64 - serviceAccountName: flannel - tolerations: - - operator: Exists - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.11.0-amd64 - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.11.0-amd64 - command: - - "/opt/bin/flanneld" - - "--ip-masq" - - "--kube-subnet-mgr" - - "--iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }}" - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg -`) - -func cloudupResourcesAddonsNetworkingFlannelK8s16YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingFlannelK8s16YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingFlannelK8s16YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingFlannelK8s16YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration +--- -var _cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml = []byte(`apiVersion: apps/v1 -kind: DaemonSet +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: kopeio-networking-agent - namespace: kube-system + name: ipamblocks.crd.projectcalico.org labels: - k8s-addon: networking.kope.io role.kubernetes.io/networking: "1" spec: - selector: - matchLabels: - name: kopeio-networking-agent - role.kubernetes.io/networking: "1" - template: - metadata: - labels: - name: kopeio-networking-agent - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - hostPID: true - hostIPC: true - hostNetwork: true - containers: - - resources: - requests: - cpu: 50m - memory: 100Mi - limits: - memory: 100Mi - securityContext: - privileged: true - image: kopeio/networking-agent:1.0.20181028 - name: networking-agent - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - serviceAccountName: kopeio-networking-agent - priorityClassName: system-node-critical - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock --- -apiVersion: v1 -kind: ServiceAccount +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: kopeio-networking-agent - namespace: kube-system + name: blockaffinities.crd.projectcalico.org labels: - k8s-addon: networking.kope.io role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity --- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: + name: ipamhandles.crd.projectcalico.org labels: - k8s-addon: networking.kope.io - name: kopeio:networking-agent -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - patch -- apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle --- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: + name: ipamconfigs.crd.projectcalico.org labels: - k8s-addon: networking.kope.io - name: kopeio:networking-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kopeio:networking-agent -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: system:serviceaccount:kube-system:kopeio-networking-agent -`) + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig -func cloudupResourcesAddonsNetworkingKopeIoK8s112YamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml, nil -} +--- -func cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingKopeIoK8s112YamlBytes() - if err != nil { - return nil, err - } +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer - info := bindataFileInfo{name: "cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} +--- -var _cloudupResourcesAddonsNetworkingKopeIoK8s16Yaml = []byte(`apiVersion: extensions/v1beta1 -kind: DaemonSet +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: kopeio-networking-agent - namespace: kube-system + name: bgpconfigurations.crd.projectcalico.org labels: - k8s-addon: networking.kope.io role.kubernetes.io/networking: "1" spec: - template: - metadata: - labels: - name: kopeio-networking-agent - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - hostPID: true - hostIPC: true - hostNetwork: true - containers: - - resources: - requests: - cpu: 50m - memory: 100Mi - limits: - memory: 100Mi - securityContext: - privileged: true - image: kopeio/networking-agent:1.0.20181028 - name: networking-agent - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - serviceAccountName: kopeio-networking-agent - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration --- -apiVersion: v1 -kind: ServiceAccount +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: kopeio-networking-agent - namespace: kube-system + name: ippools.crd.projectcalico.org labels: - k8s-addon: networking.kope.io role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool --- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: + name: hostendpoints.crd.projectcalico.org labels: - k8s-addon: networking.kope.io - name: kopeio:networking-agent -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - patch -- apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint --- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: + name: clusterinformations.crd.projectcalico.org labels: - k8s-addon: networking.kope.io - name: kopeio:networking-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kopeio:networking-agent -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: system:serviceaccount:kube-system:kopeio-networking-agent -`) + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation -func cloudupResourcesAddonsNetworkingKopeIoK8s16YamlBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingKopeIoK8s16Yaml, nil -} +--- -func cloudupResourcesAddonsNetworkingKopeIoK8s16Yaml() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingKopeIoK8s16YamlBytes() - if err != nil { - return nil, err - } +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy - info := bindataFileInfo{name: "cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} +--- -var _cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate = []byte(`# Pulled and modified from https://raw.githubusercontent.com/cloudnativelabs/kube-router/v1.0.1/daemonset/kubeadm-kuberouter.yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset -apiVersion: v1 -kind: ConfigMap +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: kube-router-cfg - namespace: kube-system + name: networkpolicies.crd.projectcalico.org labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "cniVersion":"0.3.0", - "name":"mynet", - "plugins":[ - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "ipam":{ - "type":"host-local" - } - }, - { - "type": "portmap", - "capabilities": { - "snat": true, - "portMappings": true - } - } - ] - } + role.kubernetes.io/networking: "1" +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy + --- -apiVersion: apps/v1 -kind: DaemonSet + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: + name: networksets.crd.projectcalico.org labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system + role.kubernetes.io/networking: "1" spec: - selector: - matchLabels: - k8s-app: kube-router - tier: node - template: - metadata: - labels: - k8s-app: kube-router - tier: node - spec: - priorityClassName: system-node-critical - serviceAccountName: kube-router - containers: - - name: kube-router - image: docker.io/cloudnativelabs/kube-router:v1.0.1 - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy=true - - --bgp-graceful-restart=true - - --kubeconfig=/var/lib/kube-router/kubeconfig - - --metrics-port=12013 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: KUBE_ROUTER_CNI_CONF_FILE - value: /etc/cni/net.d/10-kuberouter.conflist - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 100m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router/kubeconfig - readOnly: true - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - initContainers: - - name: install-cni - image: docker.io/cloudnativelabs/kube-router:v1.0.1 - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then - if [ -f /etc/cni/net.d/*.conf ]; then - rm -f /etc/cni/net.d/*.conf; - fi; - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist; - fi - volumeMounts: - - mountPath: /etc/cni/net.d - name: cni-conf-dir - - mountPath: /etc/kube-router - name: kube-router-cfg - hostNetwork: true - tolerations: - - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - - name: cni-conf-dir - hostPath: - path: /etc/cni/net.d - - name: kube-router-cfg - configMap: - name: kube-router-cfg - - name: kubeconfig - hostPath: - path: /var/lib/kube-router/kubeconfig - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset --- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kube-router - namespace: kube-system + name: calico-kube-controllers + labels: + role.kubernetes.io/networking: "1" rules: - - apiGroups: - - "" + # Nodes are watched to monitor for deletions. + - apiGroups: [""] resources: - - namespaces - - pods - - services - nodes - - endpoints verbs: + - watch - list - get - - watch - - apiGroups: - - "networking.k8s.io" + # Pods are queried to check for existence. + - apiGroups: [""] resources: - - networkpolicies + - pods verbs: - - list - get - - watch - - apiGroups: - - extensions + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] resources: - - networkpolicies + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles verbs: - get - list - - watch + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update --- kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kube-router + name: calico-kube-controllers + labels: + role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kube-router + name: calico-kube-controllers subjects: - kind: ServiceAccount - name: kube-router - namespace: kube-system -- kind: User - name: system:kube-router -`) - -func cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingKuberouterK8s16YamlTemplate = []byte(`apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "ipam": { - "type":"host-local" - } - } ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - containers: - - name: kube-router - image: cloudnativelabs/kube-router:v0.3.1 - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy=true - - --metrics-port=12013 - - --kubeconfig=/var/lib/kube-router/kubeconfig - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 100m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router/kubeconfig - readOnly: true - initContainers: - - name: install-cni - image: busybox - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conf ]; then - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conf; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - serviceAccountName: kube-router - tolerations: - - operator: Exists - volumes: - - hostPath: - path: /lib/modules - name: lib-modules - - hostPath: - path: /etc/cni/net.d - name: cni-conf-dir - - name: kubeconfig - hostPath: - path: /var/lib/kube-router/kubeconfig - - name: kube-router-cfg - configMap: - name: kube-router-cfg ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router + name: calico-kube-controllers namespace: kube-system --- -# Kube-router roles +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kube-router - namespace: kube-system + name: calico-node + labels: + role.kubernetes.io/networking: "1" rules: + # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] resources: - - namespaces - pods - - services - nodes - - endpoints + - namespaces verbs: - get - - list - - watch - - apiGroups: ["networking.k8s.io"] + - apiGroups: [""] resources: - - networkpolicies + - endpoints + - services verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: - list - watch - - apiGroups: ["extensions"] + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles verbs: - get - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get --- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: kube-router + name: calico-node + labels: + role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kube-router + name: calico-node subjects: - kind: ServiceAccount - name: kube-router + name: calico-node namespace: kube-system -- kind: User - name: system:kube-router -`) - -func cloudupResourcesAddonsNetworkingKuberouterK8s16YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingKuberouterK8s16YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingKuberouterK8s16YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingKuberouterK8s16YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.9/manifests/calico-typha.yaml +{{ if .Networking.Calico.TyphaReplicas -}} --- -# Source: calico/templates/calico-config.yaml -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + apiVersion: v1 +kind: Service metadata: - name: calico-config + name: calico-typha namespace: kube-system labels: - role.kubernetes.io/networking: "1" -data: - # You must set a non-zero value for Typha replicas below. - typha_service_name: "{{- if .Networking.Calico.TyphaReplicas -}}calico-typha{{- else -}}none{{- end -}}" - # Configure the backend to use. - calico_backend: "bird" - - # Configure the MTU to use - {{- if .Networking.Calico.MTU }} - veth_mtu: "{{ .Networking.Calico.MTU }}" - {{- else }} - veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}" - {{- end }} - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- -# Source: calico/templates/kdd-crds.yaml -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamblocks.crd.projectcalico.org - labels: + k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMBlock - plural: ipamblocks - singular: ipamblock + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha --- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: blockaffinities.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BlockAffinity - plural: blockaffinities - singular: blockaffinity - ---- +# This manifest creates a Deployment of Typha to back the above service. -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition +apiVersion: apps/v1 +kind: Deployment metadata: - name: ipamhandles.crd.projectcalico.org + name: calico-typha + namespace: kube-system labels: + k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMHandle - plural: ipamhandles - singular: ipamhandle - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamconfigs.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMConfig - plural: ipamconfigs - singular: ipamconfig - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ or .Networking.Calico.TyphaReplicas "0" }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" + annotations: + # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical + # add-on, ensuring it gets priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + beta.kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + containers: + - image: calico/typha:v3.9.6 + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, + # this opens a port on the host, which may need to be secured. + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 --- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget metadata: - name: networkpolicies.crd.projectcalico.org + name: calico-typha + namespace: kube-system labels: + k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{{- end -}} --- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 metadata: - name: networksets.crd.projectcalico.org + name: calico-node + namespace: kube-system labels: - role.kubernetes.io/networking: "1" -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkSet - plural: networksets - singular: networkset ---- -# Source: calico/templates/rbac.yaml - -# Include a clusterrole for the kube-controllers component, -# and bind it to the calico-kube-controllers serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -rules: - # Nodes are watched to monitor for deletions. - - apiGroups: [""] - resources: - - nodes - verbs: - - watch - - list - - get - # Pods are queried to check for existence. - - apiGroups: [""] - resources: - - pods - verbs: - - get - # IPAM resources are manipulated when nodes are deleted. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - verbs: - - list - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - # Needs access to update clusterinformations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - clusterinformations - verbs: - - get - - create - - update ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system ---- -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only required for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - # These permissions are required for Calico CNI to perform IPAM allocations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipamconfigs - verbs: - - get - # Block affinities must also be watchable by confd for route aggregation. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - verbs: - - watch - # The Calico IPAM migration needs to get daemonsets. These permissions can be - # removed if not upgrading from an installation using host-local IPAM. - - apiGroups: ["apps"] - resources: - - daemonsets - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system - -{{ if .Networking.Calico.TyphaReplicas -}} ---- -# Source: calico/templates/calico-typha.yaml -# This manifest creates a Service, which will be backed by Calico's Typha daemon. -# Typha sits in between Felix and the API server, reducing Calico's load on the API server. - -apiVersion: v1 -kind: Service -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha - selector: - k8s-app: calico-typha - ---- - -# This manifest creates a Deployment of Typha to back the above service. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the calico-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: {{ or .Networking.Calico.TyphaReplicas "0" }} - revisionHistoryLimit: 2 - selector: - matchLabels: - k8s-app: calico-typha - template: - metadata: - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" - annotations: - # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical - # add-on, ensuring it gets priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' - spec: - nodeSelector: - beta.kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - # Since Calico can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: calico-node - priorityClassName: system-cluster-critical - containers: - - image: calico/typha:v3.9.6 - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, - # this opens a port on the host, which may need to be secured. - - name: TYPHA_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}" - - name: TYPHA_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}" - livenessProbe: - httpGet: - path: /liveness - port: 9098 - host: localhost - periodSeconds: 30 - initialDelaySeconds: 30 - readinessProbe: - httpGet: - path: /readiness - port: 9098 - host: localhost - periodSeconds: 10 - ---- - -# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict - -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-typha -{{- end -}} ---- -# Source: calico/templates/calico-node.yaml -# This manifest installs the calico-node container, as well -# as the CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node + k8s-app: calico-node role.kubernetes.io/networking: "1" spec: selector: @@ -12532,2537 +9937,167 @@ status: kind: "" plural: "" conditions: [] - storedVersions: [] - ---- - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: (devel) - name: networksets.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - group: crd.projectcalico.org - names: - kind: NetworkSet - listKind: NetworkSetList - plural: networksets - singular: networkset - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: NetworkSetSpec contains the specification for a NetworkSet - resource. - properties: - nets: - description: The list of IP networks that belong to this set. - items: - type: string - type: array - type: object - type: object - served: true - storage: true -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] - ---- ---- -# Source: calico/templates/calico-kube-controllers-rbac.yaml - -# Include a clusterrole for the kube-controllers component, -# and bind it to the calico-kube-controllers serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -rules: - # Nodes are watched to monitor for deletions. - - apiGroups: [""] - resources: - - nodes - verbs: - - watch - - list - - get - # Pods are queried to check for existence. - - apiGroups: [""] - resources: - - pods - verbs: - - get - # IPAM resources are manipulated when nodes are deleted. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - verbs: - - list - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - # kube-controllers manages hostendpoints. - - apiGroups: ["crd.projectcalico.org"] - resources: - - hostendpoints - verbs: - - get - - list - - create - - update - - delete - # Needs access to update clusterinformations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - clusterinformations - verbs: - - get - - create - - update - # KubeControllersConfiguration is where it gets its config - - apiGroups: ["crd.projectcalico.org"] - resources: - - kubecontrollersconfigurations - verbs: - # read its own config - - get - # create a default if none exists - - create - # update status - - update - # watch for changes - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system ---- - ---- -# Source: calico/templates/calico-node-rbac.yaml -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - # Pod CIDR auto-detection on kubeadm needs access to config maps. - - apiGroups: [""] - resources: - - configmaps - verbs: - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only required for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - # These permissions are required for Calico CNI to perform IPAM allocations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipamconfigs - verbs: - - get - # Block affinities must also be watchable by confd for route aggregation. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - verbs: - - watch - # The Calico IPAM migration needs to get daemonsets. These permissions can be - # removed if not upgrading from an installation using host-local IPAM. - - apiGroups: ["apps"] - resources: - - daemonsets - verbs: - - get - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system - -{{ if .Networking.Calico.TyphaReplicas -}} ---- -# Source: calico/templates/calico-typha.yaml -# This manifest creates a Service, which will be backed by Calico's Typha daemon. -# Typha sits in between Felix and the API server, reducing Calico's load on the API server. - -apiVersion: v1 -kind: Service -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha - selector: - k8s-app: calico-typha - ---- - -# This manifest creates a Deployment of Typha to back the above service. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the calico-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: {{ or .Networking.Calico.TyphaReplicas "0" }} - revisionHistoryLimit: 2 - selector: - matchLabels: - k8s-app: calico-typha - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - # Since Calico can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: calico-node - priorityClassName: system-cluster-critical - # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 - securityContext: - fsGroup: 65534 - containers: - - image: calico/typha:v3.16.4 - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - - name: TYPHA_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}" - - name: TYPHA_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}" - livenessProbe: - httpGet: - path: /liveness - port: 9098 - host: localhost - periodSeconds: 30 - initialDelaySeconds: 30 - securityContext: - runAsNonRoot: true - allowPrivilegeEscalation: false - readinessProbe: - httpGet: - path: /readiness - port: 9098 - host: localhost - periodSeconds: 10 - ---- - -# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict - -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-typha -{{- end }} - ---- -# Source: calico/templates/calico-node.yaml -# This manifest installs the calico-node container, as well -# as the CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Make sure calico-node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - priorityClassName: system-node-critical - initContainers: - # This container performs upgrade from host-local IPAM to calico-ipam. - # It can be deleted if this is a fresh installation, or if you have already - # upgraded to use calico-ipam. - - name: upgrade-ipam - image: calico/cni:v3.16.4 - command: ["/opt/cni/bin/calico-ipam", "-upgrade"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - volumeMounts: - - mountPath: /var/lib/cni/networks - name: host-local-net-dir - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - securityContext: - privileged: true - # This container installs the CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: calico/cni:v3.16.4 - command: ["/opt/cni/bin/install"] - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - securityContext: - privileged: true - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - - name: flexvol-driver - image: calico/pod2daemon-flexvol:v3.16.4 - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - securityContext: - privileged: true - containers: - # Runs calico-node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: calico/node:v3.16.4 - envFrom: - - configMapRef: - # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. - name: kubernetes-services-endpoint - optional: true - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - {{- if .Networking.Calico.TyphaReplicas }} - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name - {{- end }} - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,bgp" - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: IP_AUTODETECTION_METHOD - value: "{{- or .Networking.Calico.IPv4AutoDetectionMethod "first-found" }}" - - name: IP6_AUTODETECTION_METHOD - value: "{{- or .Networking.Calico.IPv6AutoDetectionMethod "first-found" }}" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}CrossSubnet{{- else -}} {{- or .Networking.Calico.IPIPMode "Always" -}} {{- end -}}" - # Enable or Disable VXLAN on the default IP pool. - - name: CALICO_IPV4POOL_VXLAN - value: "Never" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set MTU for the VXLAN tunnel device. - - name: FELIX_VXLANMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Set MTU for the Wireguard tunnel device. - - name: FELIX_WIREGUARDMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within ` + "`" + `--cluster-cidr` + "`" + `. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubeControllerManager.ClusterCIDR }}" - # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" - - name: FELIX_HEALTHENABLED - value: "true" - - # kops additions - # Enable source/destination checks for AWS - - name: FELIX_AWSSRCDSTCHECK - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}Disable{{- else -}} {{- or .Networking.Calico.AwsSrcDstCheck "DoNothing" -}} {{- end -}}" - # Enable eBPF dataplane mode - - name: FELIX_BPFENABLED - value: "{{ .Networking.Calico.BPFEnabled }}" - # Controls how traffic from outside the cluster to NodePorts and ClusterIPs is handled - - name: FELIX_BPFEXTERNALSERVICEMODE - value: "{{- or .Networking.Calico.BPFExternalServiceMode "Tunnel" }}" - # Controls whether Felix will clean up the iptables rules created by the Kubernetes kube-proxy - - name: FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED - value: "{{- .Networking.Calico.BPFKubeProxyIptablesCleanupEnabled }}" - # Controls the log level used by the BPF programs - - name: FELIX_BPFLOGLEVEL - value: "{{- or .Networking.Calico.BPFLogLevel "Off" }}" - # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - - name: FELIX_CHAININSERTMODE - value: "{{- or .Networking.Calico.ChainInsertMode "insert" }}" - # Set Felix iptables binary variant, Legacy or NFT - - name: FELIX_IPTABLESBACKEND - value: "{{- or .Networking.Calico.IptablesBackend "Auto" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" - # Enable WireGuard encryption for all on-the-wire pod-to-pod traffic - - name: FELIX_WIREGUARDENABLED - value: "{{ .Networking.Calico.WireguardEnabled }}" - securityContext: - privileged: true - resources: - requests: - cpu: {{ or .Networking.Calico.CPURequest "100m" }} - livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live - - -bird-live - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node - - -felix-ready - - -bird-ready - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the - # parent directory. - - name: sysfs - mountPath: /sys/fs/ - # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. - # If the host is known to mount that filesystem already then Bidirectional can be omitted. - mountPropagation: Bidirectional - volumes: - # Used by calico-node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - - name: sysfs - hostPath: - path: /sys/fs/ - type: DirectoryOrCreate - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Mount in the directory for host-local IPAM allocations. This is - # used when upgrading from host-local to calico-ipam, and can be removed - # if not using the upgrade-ipam init container. - - name: host-local-net-dir - hostPath: - path: /var/lib/cni/networks - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds" ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" - ---- -# Source: calico/templates/calico-kube-controllers.yaml -# See https://github.com/projectcalico/kube-controllers -apiVersion: apps/v1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" -spec: - # The controllers can only have a single active instance. - replicas: 1 - selector: - matchLabels: - k8s-app: calico-kube-controllers - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - spec: - nodeSelector: - kubernetes.io/os: linux - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - serviceAccountName: calico-kube-controllers - priorityClassName: system-cluster-critical - containers: - - name: calico-kube-controllers - image: calico/kube-controllers:v3.16.4 - env: - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: node - - name: DATASTORE_TYPE - value: kubernetes - readinessProbe: - exec: - command: - - /usr/bin/check-status - - -r - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" - -{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} -# This manifest installs the "k8s-ec2-srcdst" container, which -# disables source/destination IP address checks for ENIs attached to -# EC2 instances hosting Kubernetes nodes. -# -# Disabling these checks allows Calico to send unencapsulated packets -# to and from pods within the same VPC subnet, where either a given -# packet's source address (originating from a pod) may not match the -# sending machine's address or the destination address (heading to a -# pod) may not match the receiving machine's address. -# -# This only applies for AWS environments. -# This is a deprecated setting, use awsSrcDstCheck instead ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update - - patch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-ec2-srcdst -subjects: -- kind: ServiceAccount - name: k8s-ec2-srcdst - namespace: kube-system - ---- - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 0 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: k8s-ec2-srcdst - priorityClassName: system-cluster-critical - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.3.0 - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - node-role.kubernetes.io/master: "" -{{ end -}} -`) - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17V3YamlTemplate = []byte(`{{- $etcd_scheme := EtcdScheme }} -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The calico-etcd PetSet service IP:port - etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} - {{- range $j, $member := $cluster.Members -}} - {{- if $j }},{{ end -}} - {{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001 - {{- end }}" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - {{- if eq $etcd_scheme "https" }} - "etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem", - "etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem", - "etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem", - "etcd_scheme": "https", - {{- end }} - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -rules: - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -rules: - - apiGroups: - - "" - - extensions - resources: - - pods - - namespaces - - networkpolicies - - nodes - verbs: - - watch - - list - - apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - watch - - list ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: calico/node:v3.8.0 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,bgp" - # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within ` + "`" + `--cluster-cidr` + "`" + `. - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubeControllerManager.ClusterCIDR }}" - - name: CALICO_IPV4POOL_IPIP - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}CrossSubnet{{- else -}}Always{{- end -}}" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to the desired level - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 10m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node - - -bird-ready - - -felix-ready - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: calico/cni:v3.8.0 - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - resources: - requests: - cpu: 10m - initContainers: - - name: migrate - image: calico/upgrade:v1.0.5 - command: ['/bin/sh', '-c', '/node-init-container.sh'] - env: - - name: CALICO_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - - name: CALICO_APIV1_DATASTORE_TYPE - value: "etcdv2" - - name: CALICO_APIV1_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: CALICO_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_ETCD_CA_CERT_FILE - value: /certs/ca.pem - - name: CALICO_APIV1_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_APIV1_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_APIV1_ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Necessary for gossip based DNS - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - ---- - -# This manifest deploys the Calico Kubernetes controllers. -# See https://github.com/projectcalico/kube-controllers -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' -spec: - # The controllers can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - spec: - # The controllers must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - serviceAccountName: calico-kube-controllers - containers: - - name: calico-kube-controllers - image: calico/kube-controllers:v3.8.0 - resources: - requests: - cpu: 10m - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - volumeMounts: - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - readinessProbe: - exec: - command: - - /usr/bin/check-status - - -r - initContainers: - - name: migrate - image: calico/upgrade:v1.0.5 - command: ['/bin/sh', '-c', '/controller-init.sh'] - env: - - name: CALICO_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - - name: CALICO_APIV1_DATASTORE_TYPE - value: "etcdv2" - - name: CALICO_APIV1_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: CALICO_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_ETCD_CA_CERT_FILE - value: /certs/ca.pem - - name: CALICO_APIV1_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_APIV1_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_APIV1_ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - # Necessary for gossip based DNS - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - -# This manifest runs the Migration complete container that monitors for the -# completion of the calico-node Daemonset rollout and when it finishes -# successfully rolling out it will mark the migration complete and allow pods -# to be created again. ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-upgrade-job - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-upgrade-job - labels: - role.kubernetes.io/networking: "1" -rules: - - apiGroups: - - extensions - resources: - - daemonsets - - daemonsets/status - verbs: - - get - - list - - watch ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-upgrade-job - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-upgrade-job -subjects: -- kind: ServiceAccount - name: calico-upgrade-job - namespace: kube-system ---- -# If anything in this job is changed then the name of the job -# should be changed because Jobs cannot be updated, so changing -# the name would run a different Job if the previous version had been -# created before and it does not hurt to rerun this job. - -apiVersion: batch/v1 -kind: Job -metadata: - name: calico-complete-upgrade-v331 - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" -spec: - template: - metadata: - labels: - role.kubernetes.io/networking: "1" - spec: - hostNetwork: true - serviceAccountName: calico-upgrade-job - restartPolicy: OnFailure - containers: - - name: migrate-completion - image: calico/upgrade:v1.0.5 - command: ['/bin/sh', '-c', '/completion-job.sh'] - env: - - name: EXPECTED_NODE_IMAGE - value: quay.io/calico/node:v3.7.4 - # The location of the Calico etcd cluster. - - name: CALICO_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - - name: CALICO_APIV1_DATASTORE_TYPE - value: "etcdv2" - - name: CALICO_APIV1_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: CALICO_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_ETCD_CA_CERT_FILE - value: /certs/ca.pem - - name: CALICO_APIV1_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_APIV1_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_APIV1_ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - -{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} -# This manifest installs the k8s-ec2-srcdst container, which disables -# src/dst ip checks to allow BGP to function for calico for hosts within subnets -# This only applies for AWS environments. ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update - - patch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-ec2-srcdst -subjects: -- kind: ServiceAccount - name: k8s-ec2-srcdst - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: k8s-ec2-srcdst - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.2.1 - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - node-role.kubernetes.io/master: "" -{{- end -}} -`) - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17V3YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17V3YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17V3YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17V3YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17YamlTemplate = []byte(`{{- $etcd_scheme := EtcdScheme }} -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The calico-etcd PetSet service IP:port - etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} - {{- range $j, $member := $cluster.Members -}} - {{- if $j }},{{ end -}} - {{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001 - {{- end }}" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - {{- if eq $etcd_scheme "https" }} - "etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem", - "etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem", - "etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem", - "etcd_scheme": "https", - {{- end }} - "log_level": "info", - {{- if .Networking.Calico.MTU }} - "mtu": {{- or .Networking.Calico.MTU }}, - {{- end }} - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: calico - namespace: kube-system - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: calico - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v2.6.12 - resources: - requests: - cpu: 10m - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - # Enable BGP. Disable to enforce policy only. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubeControllerManager.ClusterCIDR }}" - - name: CALICO_IPV4POOL_IPIP - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,bgp" - # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Auto-detect the BGP IP address. - - name: IP - value: "" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to the desired level - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" - - name: FELIX_HEALTHENABLED - value: "true" - {{- if .Networking.Calico.MTU }} - - name: FELIX_IPINIPMTU - value: "{{- or .Networking.Calico.MTU }}" - {{- end}} - securityContext: - privileged: true - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.11.8 - resources: - requests: - cpu: 10m - imagePullPolicy: Always - command: ["/install-cni.sh"] - env: - # The name of calico config file - - name: CNI_CONF_NAME - value: 10-calico.conflist - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - ---- - -# This manifest deploys the Calico Kubernetes controllers. -# See https://github.com/projectcalico/kube-controllers -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" -spec: - # The controllers can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # The controllers must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - serviceAccountName: calico - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - containers: - - name: calico-kube-controllers - image: quay.io/calico/kube-controllers:v1.0.5 - resources: - requests: - cpu: 10m - env: - # By default only policy, profile, workloadendpoint are turned - # on, node controller will decommission nodes that do not exist anymore - # this and CALICO_K8S_NODE_REF in calico-node fixes #3224, but invalid nodes that are - # already registered in calico needs to be deleted manually, see - # https://docs.projectcalico.org/v2.6/usage/decommissioning-a-node - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} ---- - -# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then -# be removed entirely once the new kube-controllers deployment has been deployed above. -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy -spec: - # Turn this deployment off in favor of the kube-controllers deployment above. - replicas: 0 - strategy: - type: Recreate - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy - spec: - hostNetwork: true - serviceAccountName: calico - containers: - - name: calico-policy-controller - # This shouldn't get updated, since this is the last version we shipped that should be used. - image: quay.io/calico/kube-policy-controller:v0.7.0 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{ end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - -{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} -# This manifest installs the k8s-ec2-srcdst container, which disables -# src/dst ip checks to allow BGP to function for calico for hosts within subnets -# This only applies for AWS environments. ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update - - patch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-ec2-srcdst -subjects: -- kind: ServiceAccount - name: k8s-ec2-srcdst - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: k8s-ec2-srcdst - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.2.2 - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - node-role.kubernetes.io/master: "" -{{- end -}} -`) - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.7/manifests/canal.yaml - ---- -# Source: calico/templates/calico-config.yaml -# This ConfigMap is used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # Typha is disabled. - typha_service_name: "none" - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # MTU default is 1500, can be overridden - veth_mtu: "{{- or .Networking.Canal.MTU "1500" }}" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "mtu": __CNI_MTU__, - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "vxlan" - } - } - ---- - -# Source: calico/templates/kdd-crds.yaml -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint + storedVersions: [] --- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - --- - -apiVersion: apiextensions.k8s.io/v1beta1 +apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: globalnetworkpolicies.crd.projectcalico.org + annotations: + controller-gen.kubebuilder.io/version: (devel) + name: networksets.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" spec: - scope: Cluster group: crd.projectcalico.org - version: v1 names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] --- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - --- +# Source: calico/templates/calico-kube-controllers-rbac.yaml -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - + name: calico-kube-controllers + labels: + role.kubernetes.io/networking: "1" +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are queried to check for existence. + - apiGroups: [""] + resources: + - pods + verbs: + - get + # IPAM resources are manipulated when nodes are deleted. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - create + - update + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch --- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: networksets.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkSet - plural: networksets - singular: networkset - + name: calico-kube-controllers + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: +- kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system --- +--- +# Source: calico/templates/calico-node-rbac.yaml # Include a clusterrole for the calico-node DaemonSet, -# and bind it to the canal serviceaccount. +# and bind it to the calico-node serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: calico + name: calico-node + labels: + role.kubernetes.io/networking: "1" rules: # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] @@ -15082,6 +10117,12 @@ rules: - list # Used to discover Typhas. - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get - apiGroups: [""] resources: - nodes/status @@ -15128,6 +10169,7 @@ rules: - networksets - clusterinformations - hostendpoints + - blockaffinities verbs: - get - list @@ -15158,75 +10200,214 @@ rules: verbs: - create - update ---- -# Flannel ClusterRole -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/k8s-manifests/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: - - apiGroups: [""] + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] resources: - - pods + - blockaffinities + - ipamblocks + - ipamhandles verbs: - get - - apiGroups: [""] + - list + - create + - update + - delete + - apiGroups: ["crd.projectcalico.org"] resources: - - nodes + - ipamconfigs verbs: - - list - - watch - - apiGroups: [""] + - get + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] resources: - - nodes/status + - blockaffinities verbs: - - patch ---- -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get + --- -# Bind the Calico ClusterRole to the canal ServiceAccount. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: canal-calico + name: calico-node + labels: + role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: calico + name: calico-node subjects: - kind: ServiceAccount - name: canal + name: calico-node namespace: kube-system +{{ if .Networking.Calico.TyphaReplicas -}} --- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha + +--- + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the calico-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ or .Networking.Calico.TyphaReplicas "0" }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: calico-node + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: calico/typha:v3.16.4 + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "{{- or .Networking.Calico.TyphaPrometheusMetricsEnabled "false" }}" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "{{- or .Networking.Calico.TyphaPrometheusMetricsPort "9093" }}" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + +--- + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{{- end }} + +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 metadata: - name: canal + name: calico-node namespace: kube-system labels: - k8s-app: canal + k8s-app: calico-node + role.kubernetes.io/networking: "1" spec: selector: matchLabels: - k8s-app: canal + k8s-app: calico-node updateStrategy: type: RollingUpdate rollingUpdate: @@ -15234,20 +10415,14 @@ spec: template: metadata: labels: - k8s-app: canal - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' + k8s-app: calico-node + role.kubernetes.io/networking: "1" spec: - priorityClassName: system-node-critical nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux hostNetwork: true tolerations: - # Make sure canal gets scheduled on all nodes. + # Make sure calico-node gets scheduled on all nodes. - effect: NoSchedule operator: Exists # Mark the pod as a critical add-on for rescheduling. @@ -15255,37 +10430,71 @@ spec: operator: Exists - effect: NoExecute operator: Exists - serviceAccountName: canal + serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical initContainers: - # This container installs the Calico CNI binaries + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: calico/cni:v3.16.4 + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries # and CNI network config file on each node. - name: install-cni - image: calico/cni:v3.7.5 - command: ["/install-cni.sh"] + image: calico/cni:v3.16.4 + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true env: # Name of the CNI config file to create. - name: CNI_CONF_NAME - value: "10-canal.conflist" - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: canal-config - key: veth_mtu + value: "10-calico.conflist" # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: - name: canal-config + name: calico-config key: cni_network_config # Set the hostname based on the k8s node name. - name: KUBERNETES_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu # Prevents the container from sleeping forever. - name: SLEEP value: "false" @@ -15294,19 +10503,40 @@ spec: name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: calico/pod2daemon-flexvol:v3.16.4 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true containers: - # Runs calico/node container on each Kubernetes node. This + # Runs calico-node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node - image: calico/node:v3.7.5 + image: calico/node:v3.16.4 + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" - # Configure route aggregation based on pod CIDR. - - name: USE_POD_CIDR - value: "true" + {{- if .Networking.Calico.TyphaReplicas }} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: calico-config + key: typha_service_name + {{- end }} # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" @@ -15315,70 +10545,123 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - # Don't enable BGP. + # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND - value: "none" + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE - value: "k8s,canal" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # No IP address needed. + value: "kops,bgp" + # Auto-detect the BGP IP address. - name: IP - value: "" - # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" + value: "autodetect" + - name: IP_AUTODETECTION_METHOD + value: "{{- or .Networking.Calico.IPv4AutoDetectionMethod "first-found" }}" + - name: IP6_AUTODETECTION_METHOD + value: "{{- or .Networking.Calico.IPv6AutoDetectionMethod "first-found" }}" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}CrossSubnet{{- else -}} {{- or .Networking.Calico.IPIPMode "Always" -}} {{- end -}}" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: - name: canal-config + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within ` + "`" + `--cluster-cidr` + "`" + `. + - name: CALICO_IPV4POOL_CIDR + value: "{{ .KubeControllerManager.ClusterCIDR }}" + # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" - # Set Felix logging to "INFO" + # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" + value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" + - name: FELIX_HEALTHENABLED + value: "true" + + # kops additions + # Enable source/destination checks for AWS + - name: FELIX_AWSSRCDSTCHECK + value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}Disable{{- else -}} {{- or .Networking.Calico.AwsSrcDstCheck "DoNothing" -}} {{- end -}}" + # Enable eBPF dataplane mode + - name: FELIX_BPFENABLED + value: "{{ .Networking.Calico.BPFEnabled }}" + # Controls how traffic from outside the cluster to NodePorts and ClusterIPs is handled + - name: FELIX_BPFEXTERNALSERVICEMODE + value: "{{- or .Networking.Calico.BPFExternalServiceMode "Tunnel" }}" + # Controls whether Felix will clean up the iptables rules created by the Kubernetes kube-proxy + - name: FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED + value: "{{- .Networking.Calico.BPFKubeProxyIptablesCleanupEnabled }}" + # Controls the log level used by the BPF programs + - name: FELIX_BPFLOGLEVEL + value: "{{- or .Networking.Calico.BPFLogLevel "Off" }}" # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - name: FELIX_CHAININSERTMODE - value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" + value: "{{- or .Networking.Calico.ChainInsertMode "insert" }}" + # Set Felix iptables binary variant, Legacy or NFT + - name: FELIX_IPTABLESBACKEND + value: "{{- or .Networking.Calico.IptablesBackend "Auto" }}" # Set to enable the experimental Prometheus metrics server - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" + value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" # TCP port that the Prometheus metrics server should bind to - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}" + value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" # Enable Prometheus Go runtime metrics collection - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}" + value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" # Enable Prometheus process metrics collection - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" - - name: FELIX_HEALTHENABLED - value: "true" + value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" + # Enable WireGuard encryption for all on-the-wire pod-to-pod traffic + - name: FELIX_WIREGUARDENABLED + value: "{{ .Networking.Calico.WireguardEnabled }}" securityContext: privileged: true resources: requests: - cpu: {{ or .Networking.Canal.CPURequest "100m" }} + cpu: {{ or .Networking.Calico.CPURequest "100m" }} livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: - httpGet: - path: /readiness - port: 9099 - host: localhost + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready periodSeconds: 10 volumeMounts: - mountPath: /lib/modules @@ -15393,44 +10676,17 @@ spec: - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: quay.io/coreos/flannel:v0.11.0 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - {{- if eq .Networking.Canal.DisableFlannelForwardRules true }} - - name: FLANNELD_IPTABLES_FORWARD_RULES - value: "false" - {{- end }} - volumeMounts: - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - name: flannel-cfg - mountPath: /etc/kube-flannel/ + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: sysfs + mountPath: /sys/fs/ + # Bidirectional means that, if we mount the BPF filesystem at /sys/fs/bpf it will propagate to the host. + # If the host is known to mount that filesystem already then Bidirectional can be omitted. + mountPropagation: Bidirectional volumes: - # Used by calico/node. + # Used by calico-node. - name: lib-modules hostPath: path: /lib/modules @@ -15444,10 +10700,10 @@ spec: hostPath: path: /run/xtables.lock type: FileOrCreate - # Used by flannel. - - name: flannel-cfg - configMap: - name: canal-config + - name: sysfs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate # Used to install CNI. - name: cni-bin-dir hostPath: @@ -15455,31 +10711,222 @@ spec: - name: cni-net-dir hostPath: path: /etc/cni/net.d + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds" +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" + +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + role.kubernetes.io/networking: "1" + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: calico/kube-controllers:v3.16.4 + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + --- apiVersion: v1 kind: ServiceAccount metadata: - name: canal + name: calico-kube-controllers + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" + +{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} +# This manifest installs the "k8s-ec2-srcdst" container, which +# disables source/destination IP address checks for ENIs attached to +# EC2 instances hosting Kubernetes nodes. +# +# Disabling these checks allows Calico to send unencapsulated packets +# to and from pods within the same VPC subnet, where either a given +# packet's source address (originating from a pod) may not match the +# sending machine's address or the destination address (heading to a +# pod) may not match the receiving machine's address. +# +# This only applies for AWS environments. +# This is a deprecated setting, use awsSrcDstCheck instead +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: k8s-ec2-srcdst + labels: + role.kubernetes.io/networking: "1" +rules: +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch + - update + - patch + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: k8s-ec2-srcdst + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: k8s-ec2-srcdst + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8s-ec2-srcdst +subjects: +- kind: ServiceAccount + name: k8s-ec2-srcdst + namespace: kube-system + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: k8s-ec2-srcdst namespace: kube-system + labels: + k8s-app: k8s-ec2-srcdst + role.kubernetes.io/networking: "1" +spec: + replicas: 0 + selector: + matchLabels: + k8s-app: k8s-ec2-srcdst + template: + metadata: + labels: + k8s-app: k8s-ec2-srcdst + role.kubernetes.io/networking: "1" + spec: + hostNetwork: true + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + serviceAccountName: k8s-ec2-srcdst + priorityClassName: system-cluster-critical + containers: + - image: ottoyiu/k8s-ec2-srcdst:v0.3.0 + name: k8s-ec2-srcdst + resources: + requests: + cpu: 10m + memory: 64Mi + env: + - name: AWS_REGION + value: {{ Region }} + volumeMounts: + - name: ssl-certs + mountPath: "/etc/ssl/certs/ca-certificates.crt" + readOnly: true + imagePullPolicy: "Always" + volumes: + - name: ssl-certs + hostPath: + path: "/etc/ssl/certs/ca-certificates.crt" + nodeSelector: + node-role.kubernetes.io/master: "" +{{ end -}} `) -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate, nil +func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate, nil } -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplateBytes() +func cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplateBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.12/manifests/canal.yaml +var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.7/manifests/canal.yaml --- # Source: calico/templates/calico-config.yaml @@ -15489,11 +10936,9 @@ apiVersion: v1 metadata: name: canal-config namespace: kube-system - labels: - role.kubernetes.io/networking: "1" data: # Typha is disabled. - typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}" + typha_service_name: "none" # The interface used by canal for host <-> host communication. # If left blank, then the interface is chosen using the node's # default route. @@ -15503,26 +10948,22 @@ data: # the pod network. masquerade: "true" - # Configure the MTU to use - {{- if .Networking.Canal.MTU }} - veth_mtu: "{{ .Networking.Canal.MTU }}" - {{- else }} - veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}" - {{- end }} + # MTU default is 1500, can be overridden + veth_mtu: "{{- or .Networking.Canal.MTU "1500" }}" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. cni_network_config: |- { "name": "k8s-pod-network", - "cniVersion": "0.3.1", + "cniVersion": "0.3.0", "plugins": [ { "type": "calico", "log_level": "info", "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", "mtu": __CNI_MTU__, + "nodename": "__KUBERNETES_NODE_NAME__", "ipam": { "type": "host-local", "subnet": "usePodCidr" @@ -15538,10 +10979,6 @@ data: "type": "portmap", "snat": true, "capabilities": {"portMappings": true} - }, - { - "type": "bandwidth", - "capabilities": {"bandwidth": true} } ] } @@ -15555,115 +10992,27 @@ data: } } ---- -# Source: calico/templates/kdd-crds.yaml -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamblocks.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMBlock - plural: ipamblocks - singular: ipamblock - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: blockaffinities.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BlockAffinity - plural: blockaffinities - singular: blockaffinity - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamhandles.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMHandle - plural: ipamhandles - singular: ipamhandle - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamconfigs.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMConfig - plural: ipamconfigs - singular: ipamconfig - --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: bgppeers.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" + name: felixconfigurations.crd.projectcalico.org spec: scope: Cluster group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -15679,8 +11028,6 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ippools.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -15696,8 +11043,6 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: hostendpoints.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -15713,8 +11058,6 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -15730,8 +11073,6 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworkpolicies.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -15747,8 +11088,6 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: globalnetworksets.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -15764,8 +11103,6 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networkpolicies.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Namespaced group: crd.projectcalico.org @@ -15781,8 +11118,6 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: networksets.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" spec: scope: Namespaced group: crd.projectcalico.org @@ -15791,17 +11126,15 @@ spec: kind: NetworkSet plural: networksets singular: networkset + --- -# Source: calico/templates/rbac.yaml # Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. +# and bind it to the canal serviceaccount. kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: calico - labels: - role.kubernetes.io/networking: "1" rules: # The CNI plugin needs to get pods, nodes, and namespaces. - apiGroups: [""] @@ -15867,7 +11200,6 @@ rules: - networksets - clusterinformations - hostendpoints - - blockaffinities verbs: - get - list @@ -15900,13 +11232,11 @@ rules: - update --- # Flannel ClusterRole -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/k8s-manifests/kube-flannel-rbac.yml kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: flannel - labels: - role.kubernetes.io/networking: "1" rules: - apiGroups: [""] resources: @@ -15930,8 +11260,6 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: canal-flannel - labels: - role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -15941,12 +11269,11 @@ subjects: name: canal namespace: kube-system --- +# Bind the Calico ClusterRole to the canal ServiceAccount. apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: canal-calico - labels: - role.kubernetes.io/networking: "1" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -15956,150 +11283,10 @@ subjects: name: canal namespace: kube-system -{{ if .Networking.Canal.TyphaReplicas -}} ---- -# Source: calico/templates/calico-typha.yaml -# This manifest creates a Service, which will be backed by Calico's Typha daemon. -# Typha sits in between Felix and the API server, reducing Calico's load on the API server. - -apiVersion: v1 -kind: Service -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha - selector: - k8s-app: calico-typha - ---- - -# This manifest creates a Deployment of Typha to back the above service. - -apiVersion: apps/v1 -kind: Deployment -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the canal-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: {{ or .Networking.Canal.TyphaReplicas 0 }} - revisionHistoryLimit: 2 - selector: - matchLabels: - k8s-app: calico-typha - template: - metadata: - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" - annotations: - # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical - # add-on, ensuring it gets priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' - spec: - nodeSelector: - kubernetes.io/os: linux - kubernetes.io/role: master - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: "node-role.kubernetes.io/master" - effect: NoSchedule - # Since Calico can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: canal - priorityClassName: system-cluster-critical - # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 - securityContext: - fsGroup: 65534 - containers: - - image: calico/typha:v3.12.2 - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - - name: TYPHA_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}" - - name: TYPHA_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}" - livenessProbe: - httpGet: - path: /liveness - port: 9098 - host: localhost - periodSeconds: 30 - initialDelaySeconds: 30 - securityContext: - runAsNonRoot: true - allowPrivilegeEscalation: false - readinessProbe: - httpGet: - path: /readiness - port: 9098 - host: localhost - periodSeconds: 10 - --- -# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict - -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: calico-typha - namespace: kube-system - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" -spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-typha -{{- end }} - ---- -# Source: calico/templates/calico-node.yaml -# This manifest installs the canal container, as well -# as the CNI plugins and network config on +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 @@ -16108,7 +11295,6 @@ metadata: namespace: kube-system labels: k8s-app: canal - role.kubernetes.io/networking: "1" spec: selector: matchLabels: @@ -16121,7 +11307,6 @@ spec: metadata: labels: k8s-app: canal - role.kubernetes.io/networking: "1" annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets @@ -16129,8 +11314,9 @@ spec: # if it ever gets evicted. scheduler.alpha.kubernetes.io/critical-pod: '' spec: + priorityClassName: system-node-critical nodeSelector: - kubernetes.io/os: linux + beta.kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure canal gets scheduled on all nodes. @@ -16145,17 +11331,22 @@ spec: # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 - priorityClassName: system-node-critical initContainers: - # This container installs the CNI binaries + # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni - image: calico/cni:v3.12.2 + image: calico/cni:v3.7.5 command: ["/install-cni.sh"] env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-canal.conflist" + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: canal-config + key: veth_mtu # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: @@ -16167,12 +11358,6 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: canal-config - key: veth_mtu # Prevents the container from sleeping forever. - name: SLEEP value: "false" @@ -16181,23 +11366,12 @@ spec: name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir - securityContext: - privileged: true - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - - name: flexvol-driver - image: calico/pod2daemon-flexvol:v3.12.2 - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - securityContext: - privileged: true containers: - # Runs canal container on each Kubernetes node. This + # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node - image: calico/node:v3.12.2 + image: calico/node:v3.7.5 env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE @@ -16205,14 +11379,6 @@ spec: # Configure route aggregation based on pod CIDR. - name: USE_POD_CIDR value: "true" - {{- if .Networking.Canal.TyphaReplicas }} - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: canal-config - key: typha_service_name - {{- end }} # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" @@ -16226,7 +11392,6 @@ spec: value: "none" # Cluster type to identify the deployment type - name: CLUSTER_TYPE - # was value: "k8s,bgp" value: "k8s,canal" # Period, in seconds, at which felix re-applies all iptables state - name: FELIX_IPTABLESREFRESHINTERVAL @@ -16234,34 +11399,26 @@ spec: # No IP address needed. - name: IP value: "" - # Set MTU for tunnel device used if ipip is enabled + # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" - name: FELIX_IPINIPMTU valueFrom: configMapKeyRef: name: canal-config key: veth_mtu - # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to "INFO" - name: FELIX_LOGSEVERITYSCREEN value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}" - - name: FELIX_HEALTHENABLED - value: "true" - - # kops additions + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - name: FELIX_CHAININSERTMODE value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" - # Set Felix iptables binary variant, Legacy or NFT - - name: FELIX_IPTABLESBACKEND - value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}" # Set to enable the experimental Prometheus metrics server - name: FELIX_PROMETHEUSMETRICSENABLED value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" @@ -16274,16 +11431,18 @@ spec: # Enable Prometheus process metrics collection - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" + - name: FELIX_HEALTHENABLED + value: "true" securityContext: privileged: true resources: requests: cpu: {{ or .Networking.Canal.CPURequest "100m" }} livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live + httpGet: + path: /liveness + port: 9099 + host: localhost periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 @@ -16306,8 +11465,6 @@ spec: - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - - name: policysync - mountPath: /var/run/nodeagent # This container runs flannel using the kube-subnet-mgr backend # for allocating subnets. - name: kube-flannel @@ -16345,7 +11502,7 @@ spec: - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - # Used by canal. + # Used by calico/node. - name: lib-modules hostPath: path: /lib/modules @@ -16361,1036 +11518,660 @@ spec: type: FileOrCreate # Used by flannel. - name: flannel-cfg - configMap: - name: canal-config - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds" ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" -`) - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.13/manifests/canal.yaml - ---- -# Source: calico/templates/calico-config.yaml -# This ConfigMap is used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" -data: - # Typha is disabled. - typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}" - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # Configure the MTU to use - {{- if .Networking.Canal.MTU }} - veth_mtu: "{{ .Networking.Canal.MTU }}" - {{- else }} - veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}" - {{- end }} - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - }, - { - "type": "bandwidth", - "capabilities": {"bandwidth": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "vxlan" - } - } - ---- -# Source: calico/templates/kdd-crds.yaml - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: blockaffinities.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BlockAffinity - plural: blockaffinities - singular: blockaffinity - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - + configMap: + name: canal-config + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d --- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamblocks.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMBlock - plural: ipamblocks - singular: ipamblock ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition +apiVersion: v1 +kind: ServiceAccount metadata: - name: ipamconfigs.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMConfig - plural: ipamconfigs - singular: ipamconfig + name: canal + namespace: kube-system +`) ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamhandles.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMHandle - plural: ipamhandles - singular: ipamhandle +func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate, nil +} ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool +func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplateBytes() + if err != nil { + return nil, err + } ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org - labels: - role.kubernetes.io/networking: "1" -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy + info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.12/manifests/canal.yaml --- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Canal installation. +kind: ConfigMap +apiVersion: v1 metadata: - name: networksets.crd.projectcalico.org + name: canal-config + namespace: kube-system labels: role.kubernetes.io/networking: "1" -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkSet - plural: networksets - singular: networkset +data: + # Typha is disabled. + typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}" + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: "" ---- -# Source: calico/templates/rbac.yaml + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: "true" -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico - labels: - role.kubernetes.io/networking: "1" -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - # Pod CIDR auto-detection on kubeadm needs access to config maps. - - apiGroups: [""] - resources: - - configmaps - verbs: - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - - blockaffinities - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only required for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update + # Configure the MTU to use + {{- if .Networking.Canal.MTU }} + veth_mtu: "{{ .Networking.Canal.MTU }}" + {{- else }} + veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}" + {{- end }} + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": "{{ .NonMasqueradeCIDR }}", + "Backend": { + "Type": "vxlan" + } + } --- -# Flannel ClusterRole -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: flannel + name: felixconfigurations.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" -rules: - - apiGroups: [""] - resources: - - pods - verbs: - - get - - apiGroups: [""] - resources: - - nodes - verbs: - - list - - watch - - apiGroups: [""] - resources: - - nodes/status - verbs: - - patch +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration --- -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: canal-flannel + name: ipamblocks.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock + --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: canal-calico + name: blockaffinities.crd.projectcalico.org labels: role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity -{{ if .Networking.Canal.TyphaReplicas -}} --- -# Source: calico/templates/calico-typha.yaml -# This manifest creates a Service, which will be backed by Calico's Typha daemon. -# Typha sits in between Felix and the API server, reducing Calico's load on the API server. -apiVersion: v1 -kind: Service +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: calico-typha - namespace: kube-system + name: ipamhandles.crd.projectcalico.org labels: - k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: - ports: - - port: 5473 - protocol: TCP - targetPort: calico-typha - name: calico-typha - selector: - k8s-app: calico-typha + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle --- -# This manifest creates a Deployment of Typha to back the above service. +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig -apiVersion: apps/v1 -kind: Deployment +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: calico-typha - namespace: kube-system + name: bgppeers.crd.projectcalico.org labels: - k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: - # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the - # typha_service_name variable in the canal-config ConfigMap above. - # - # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential - # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In - # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. - replicas: {{ or .Networking.Canal.TyphaReplicas 0 }} - revisionHistoryLimit: 2 - selector: - matchLabels: - k8s-app: calico-typha - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-typha - role.kubernetes.io/networking: "1" - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - # Since Calico can't network a pod until Typha is up, we need to run Typha itself - # as a host-networked pod. - serviceAccountName: canal - priorityClassName: system-cluster-critical - # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 - securityContext: - fsGroup: 65534 - containers: - - image: calico/typha:v3.13.4 - name: calico-typha - ports: - - containerPort: 5473 - name: calico-typha - protocol: TCP - env: - # Enable "info" logging by default. Can be set to "debug" to increase verbosity. - - name: TYPHA_LOGSEVERITYSCREEN - value: "info" - # Disable logging to file and syslog since those don't make sense in Kubernetes. - - name: TYPHA_LOGFILEPATH - value: "none" - - name: TYPHA_LOGSEVERITYSYS - value: "none" - # Monitor the Kubernetes API to find the number of running instances and rebalance - # connections. - - name: TYPHA_CONNECTIONREBALANCINGMODE - value: "kubernetes" - - name: TYPHA_DATASTORETYPE - value: "kubernetes" - - name: TYPHA_HEALTHENABLED - value: "true" - - name: TYPHA_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}" - - name: TYPHA_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}" - livenessProbe: - httpGet: - path: /liveness - port: 9098 - host: localhost - periodSeconds: 30 - initialDelaySeconds: 30 - securityContext: - runAsNonRoot: true - allowPrivilegeEscalation: false - readinessProbe: - httpGet: - path: /readiness - port: 9098 - host: localhost - periodSeconds: 10 + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPPeer + plural: bgppeers + singular: bgppeer --- -# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: BGPConfiguration + plural: bgpconfigurations + singular: bgpconfiguration -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: calico-typha - namespace: kube-system + name: ippools.crd.projectcalico.org labels: - k8s-app: calico-typha role.kubernetes.io/networking: "1" spec: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: calico-typha -{{- end }} + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool --- -# Source: calico/templates/calico-node.yaml -# This manifest installs the canal container, as well -# as the CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: apps/v1 + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: canal - namespace: kube-system + name: hostendpoints.crd.projectcalico.org labels: - k8s-app: canal role.kubernetes.io/networking: "1" spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - role.kubernetes.io/networking: "1" - spec: - nodeSelector: - kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Make sure canal gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: canal - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - priorityClassName: system-node-critical - initContainers: - # This container installs the CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: calico/cni:v3.13.4 - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-canal.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: canal-config - key: veth_mtu - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - securityContext: - privileged: true - # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes - # to communicate with Felix over the Policy Sync API. - - name: flexvol-driver - image: calico/pod2daemon-flexvol:v3.13.4 - volumeMounts: - - name: flexvol-driver-host - mountPath: /host/driver - securityContext: - privileged: true - containers: - # Runs canal container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: calico/node:v3.13.4 - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Configure route aggregation based on pod CIDR. - - name: USE_POD_CIDR - value: "true" - {{- if .Networking.Canal.TyphaReplicas }} - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: canal-config - key: typha_service_name - {{- end }} - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # No IP address needed. - - name: IP - value: "" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: canal-config - key: veth_mtu - # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Canal.LogSeveritySys "info" }}" - - name: FELIX_HEALTHENABLED - value: "true" + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: ClusterInformation + plural: clusterinformations + singular: clusterinformation + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + +--- + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy - # kops additions - # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - - name: FELIX_CHAININSERTMODE - value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" - # Set Felix iptables binary variant, Legacy or NFT - - name: FELIX_IPTABLESBACKEND - value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" - securityContext: - privileged: true - resources: - requests: - cpu: {{ or .Networking.Canal.CPURequest "100m" }} - livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - host: localhost - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - - name: policysync - mountPath: /var/run/nodeagent - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: quay.io/coreos/flannel:v0.11.0 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - {{- if eq .Networking.Canal.DisableFlannelForwardRules true }} - - name: FLANNELD_IPTABLES_FORWARD_RULES - value: "false" - {{- end }} - volumeMounts: - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by canal. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - # Used by flannel. - - name: flannel-cfg - configMap: - name: canal-config - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used to create per-pod Unix Domain Sockets - - name: policysync - hostPath: - type: DirectoryOrCreate - path: /var/run/nodeagent - # Used to install Flex Volume Driver - - name: flexvol-driver-host - hostPath: - type: DirectoryOrCreate - path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds" --- - -apiVersion: v1 -kind: ServiceAccount + +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset +--- +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico + labels: + role.kubernetes.io/networking: "1" +rules: + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update +--- +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel + labels: + role.kubernetes.io/networking: "1" +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] + resources: + - nodes/status + verbs: + - patch +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 metadata: + name: canal-flannel + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount name: canal namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: canal-calico labels: role.kubernetes.io/networking: "1" -`) - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system -var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s19YamlTemplate = []byte(`# Canal Version v3.2.3 -# https://docs.projectcalico.org/v3.2/releases#v3.2.3 -# This manifest includes the following component versions: -# calico/node:v3.2.3 -# calico/cni:v3.2.3 -# coreos/flannel:v0.9.0 +{{ if .Networking.Canal.TyphaReplicas -}} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. -# This ConfigMap is used to configure a self-hosted Canal installation. -kind: ConfigMap apiVersion: v1 +kind: Service metadata: - name: canal-config + name: calico-typha namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" +--- - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } +# This manifest creates a Deployment of Typha to back the above service. - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "vxlan" - } - } +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the canal-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ or .Networking.Canal.TyphaReplicas 0 }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + template: + metadata: + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" + annotations: + # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical + # add-on, ensuring it gets priority scheduling and that its resources are reserved + # if it ever gets evicted. + scheduler.alpha.kubernetes.io/critical-pod: '' + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + kubernetes.io/os: linux + kubernetes.io/role: master + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: "node-role.kubernetes.io/master" + effect: NoSchedule + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: canal + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: calico/typha:v3.12.2 + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 --- +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: calico-typha + namespace: kube-system + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{{- end }} -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: apps/v1 @@ -17399,6 +12180,7 @@ metadata: namespace: kube-system labels: k8s-app: canal + role.kubernetes.io/networking: "1" spec: selector: matchLabels: @@ -17411,6 +12193,7 @@ spec: metadata: labels: k8s-app: canal + role.kubernetes.io/networking: "1" annotations: # This, along with the CriticalAddonsOnly toleration below, # marks the pod as a critical add-on, ensuring it gets @@ -17419,7 +12202,7 @@ spec: scheduler.alpha.kubernetes.io/critical-pod: '' spec: nodeSelector: - beta.kubernetes.io/os: linux + kubernetes.io/os: linux hostNetwork: true tolerations: # Make sure canal gets scheduled on all nodes. @@ -17434,16 +12217,74 @@ spec: # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: calico/cni:v3.12.2 + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: canal-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: canal-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: calico/pod2daemon-flexvol:v3.12.2 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true containers: - # Runs calico/node container on each Kubernetes node. This + # Runs canal container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node - image: quay.io/calico/node:v3.2.3 + image: calico/node:v3.12.2 env: # Use Kubernetes API as the backing datastore. - name: DATASTORE_TYPE value: "kubernetes" + # Configure route aggregation based on pod CIDR. + - name: USE_POD_CIDR + value: "true" + {{- if .Networking.Canal.TyphaReplicas }} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: canal-config + key: typha_service_name + {{- end }} # Wait for the datastore. - name: WAIT_FOR_DATASTORE value: "true" @@ -17457,6 +12298,7 @@ spec: value: "none" # Cluster type to identify the deployment type - name: CLUSTER_TYPE + # was value: "k8s,bgp" value: "k8s,canal" # Period, in seconds, at which felix re-applies all iptables state - name: FELIX_IPTABLESREFRESHINTERVAL @@ -17464,21 +12306,34 @@ spec: # No IP address needed. - name: IP value: "" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: canal-config + key: veth_mtu # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" - # Set Felix logging to "info" + # Set Felix logging to "INFO" - name: FELIX_LOGSEVERITYSCREEN value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" + - name: FELIX_HEALTHENABLED + value: "true" + + # kops additions # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - name: FELIX_CHAININSERTMODE value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" + # Set Felix iptables binary variant, Legacy or NFT + - name: FELIX_IPTABLESBACKEND + value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}" # Set to enable the experimental Prometheus metrics server - name: FELIX_PROMETHEUSMETRICSENABLED value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" @@ -17491,18 +12346,16 @@ spec: # Enable Prometheus process metrics collection - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" - - name: FELIX_HEALTHENABLED - value: "true" securityContext: privileged: true resources: requests: - cpu: 250m + cpu: {{ or .Networking.Canal.CPURequest "100m" }} livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost + exec: + command: + - /bin/calico-node + - -felix-live periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 @@ -17516,41 +12369,21 @@ spec: - mountPath: /lib/modules name: lib-modules readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /var/lib/calico name: var-lib-calico readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v3.2.3 - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-canal.conflist" - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir + - name: policysync + mountPath: /var/run/nodeagent # This container runs flannel using the kube-subnet-mgr backend # for allocating subnets. - name: kube-flannel - image: quay.io/coreos/flannel:v0.9.0 + image: quay.io/coreos/flannel:v0.11.0 command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] securityContext: privileged: true @@ -17573,13 +12406,18 @@ spec: configMapKeyRef: name: canal-config key: masquerade + {{- if eq .Networking.Canal.DisableFlannelForwardRules true }} + - name: FLANNELD_IPTABLES_FORWARD_RULES + value: "false" + {{- end }} volumeMounts: - - name: run - mountPath: /run + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false - name: flannel-cfg mountPath: /etc/kube-flannel/ volumes: - # Used by calico/node. + # Used by canal. - name: lib-modules hostPath: path: /lib/modules @@ -17589,10 +12427,11 @@ spec: - name: var-lib-calico hostPath: path: /var/lib/calico - # Used by flannel. - - name: run + - name: xtables-lock hostPath: - path: /run + path: /run/xtables.lock + type: FileOrCreate + # Used by flannel. - name: flannel-cfg configMap: name: canal-config @@ -17603,6 +12442,16 @@ spec: - name: cni-net-dir hostPath: path: /etc/cni/net.d + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host + hostPath: + type: DirectoryOrCreate + path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds" --- apiVersion: v1 @@ -17610,163 +12459,110 @@ kind: ServiceAccount metadata: name: canal namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +`) ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico -rules: - - apiGroups: [""] - resources: - - namespaces - - serviceaccounts - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - patch - - apiGroups: [""] - resources: - - services - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - verbs: - - get - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - globalnetworksets - - hostendpoints - - bgpconfigurations - - ippools - - globalnetworkpolicies - - networkpolicies - - clusterinformations - verbs: - - create - - get - - list - - update - - watch - ---- +func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate, nil +} -# Flannel roles -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- +func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplateBytes() + if err != nil { + return nil, err + } -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system + info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} ---- +var _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate = []byte(`# Pulled and modified from: https://docs.projectcalico.org/v3.13/manifests/canal.yaml -# Bind the ClusterRole to the canal ServiceAccount. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Canal installation. +kind: ConfigMap +apiVersion: v1 metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal + name: canal-config namespace: kube-system + labels: + role.kubernetes.io/networking: "1" +data: + # Typha is disabled. + typha_service_name: "{{ if .Networking.Canal.TyphaReplicas }}calico-typha{{ else }}none{{ end }}" + # The interface used by canal for host <-> host communication. + # If left blank, then the interface is chosen using the node's + # default route. + canal_iface: "" ---- + # Whether or not to masquerade traffic to destinations not within + # the pod network. + masquerade: "true" -# Create all the CustomResourceDefinitions needed for -# Calico policy and networking mode. + # Configure the MTU to use + {{- if .Networking.Canal.MTU }} + veth_mtu: "{{ .Networking.Canal.MTU }}" + {{- else }} + veth_mtu: "{{- if eq .CloudProvider "openstack" -}}1430{{- else -}}1440{{- end -}}" + {{- end }} + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "host-local", + "subnet": "usePodCidr" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } + + # Flannel network configuration. Mounted into the flannel container. + net-conf.json: | + { + "Network": "{{ .NonMasqueradeCIDR }}", + "Backend": { + "Type": "vxlan" + } + } -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration --- +# Source: calico/templates/kdd-crds.yaml apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: bgpconfigurations.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -17777,41 +12573,44 @@ spec: singular: bgpconfiguration --- - apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: ippools.crd.projectcalico.org + name: bgppeers.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: - kind: IPPool - plural: ippools - singular: ippool + kind: BGPPeer + plural: bgppeers + singular: bgppeer --- - apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: hostendpoints.crd.projectcalico.org + name: blockaffinities.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint + kind: BlockAffinity + plural: blockaffinities + singular: blockaffinity --- - apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: clusterinformations.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org @@ -17822,870 +12621,1077 @@ spec: singular: clusterinformation --- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: globalnetworksets.crd.projectcalico.org + name: felixconfigurations.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" spec: scope: Cluster group: crd.projectcalico.org version: v1 names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset + kind: FelixConfiguration + plural: felixconfigurations + singular: felixconfiguration --- - apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy -`) - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s19YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s19YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s19YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s19YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - -var _cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate = []byte(`# Pulled and modified from: https://github.com/weaveworks/weave/releases/download/v2.7.0/weave-daemonset-k8s-1.11.yaml - -{{- if WeaveSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: weave-net - namespace: kube-system -stringData: - network-password: {{ WeaveSecret }} ---- -{{- end }} - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: weave-net - labels: - name: weave-net - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: weave-net - labels: - name: weave-net - role.kubernetes.io/networking: "1" - namespace: kube-system -rules: - - apiGroups: - - '' - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - 'networking.k8s.io' - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - '' - resources: - - nodes/status - verbs: - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: weave-net + name: globalnetworkpolicies.crd.projectcalico.org labels: - name: weave-net role.kubernetes.io/networking: "1" - namespace: kube-system -roleRef: - kind: ClusterRole - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkPolicy + plural: globalnetworkpolicies + singular: globalnetworkpolicy + --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: weave-net - namespace: kube-system + name: globalnetworksets.crd.projectcalico.org labels: - name: weave-net -rules: - - apiGroups: - - '' - resources: - - configmaps - resourceNames: - - weave-net - verbs: - - get - - update - - apiGroups: - - '' - resources: - - configmaps - verbs: - - create + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: GlobalNetworkSet + plural: globalnetworksets + singular: globalnetworkset + --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: weave-net - namespace: kube-system + name: hostendpoints.crd.projectcalico.org labels: - name: weave-net -roleRef: - kind: Role - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: HostEndpoint + plural: hostendpoints + singular: hostendpoint + --- -apiVersion: apps/v1 -kind: DaemonSet +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: weave-net + name: ipamblocks.crd.projectcalico.org labels: - name: weave-net role.kubernetes.io/networking: "1" - namespace: kube-system spec: - # Wait 5 seconds to let pod connect before rolling next pod - selector: - matchLabels: - name: weave-net - role.kubernetes.io/networking: "1" - minReadySeconds: 5 - template: - metadata: - labels: - name: weave-net - role.kubernetes.io/networking: "1" - annotations: - prometheus.io/scrape: "true" - spec: - containers: - - name: weave - command: - - /home/weave/launch.sh - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: IPALLOC_RANGE - value: {{ .KubeControllerManager.ClusterCIDR }} - {{- if .Networking.Weave.MTU }} - - name: WEAVE_MTU - value: "{{ .Networking.Weave.MTU }}" - {{- end }} - {{- if .Networking.Weave.NoMasqLocal }} - - name: NO_MASQ_LOCAL - value: "{{ .Networking.Weave.NoMasqLocal }}" - {{- end }} - {{- if .Networking.Weave.ConnLimit }} - - name: CONN_LIMIT - value: "{{ .Networking.Weave.ConnLimit }}" - {{- end }} - {{- if .Networking.Weave.NetExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NetExtraArgs }}" - {{- end }} - {{- if WeaveSecret }} - - name: WEAVE_PASSWORD - valueFrom: - secretKeyRef: - name: weave-net - key: network-password - {{- end }} - image: 'weaveworks/weave-kube:2.7.0' - ports: - - name: metrics - containerPort: 6782 - readinessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - resources: - requests: - cpu: {{ or .Networking.Weave.CPURequest "50m" }} - memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.CPULimit }} - cpu: {{ .Networking.Weave.CPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - - name: weave-npc - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if .Networking.Weave.NPCExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NPCExtraArgs }}" - {{- end }} - image: 'weaveworks/weave-npc:2.7.0' - ports: - - name: metrics - containerPort: 6781 - resources: - requests: - cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }} - memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.NPCCPULimit }} - cpu: {{ .Networking.Weave.NPCCPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - volumes: - - name: weavedb - hostPath: - path: /var/lib/weave - - name: cni-bin - hostPath: - path: /opt - - name: cni-bin2 - hostPath: - path: /home - - name: cni-conf - hostPath: - path: /etc - - name: dbus - hostPath: - path: /var/lib/dbus - - name: lib-modules - hostPath: - path: /lib/modules - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - priorityClassName: system-node-critical - updateStrategy: - type: RollingUpdate -`) - -func cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate, nil -} - -func cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplateBytes() - if err != nil { - return nil, err - } + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMBlock + plural: ipamblocks + singular: ipamblock - info := bindataFileInfo{name: "cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMConfig + plural: ipamconfigs + singular: ipamconfig -var _cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplate = []byte(`# Pulled and modified from: https://github.com/weaveworks/weave/releases/download/v2.7.0/weave-daemonset-k8s-1.9.yaml +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPAMHandle + plural: ipamhandles + singular: ipamhandle -{{- if WeaveSecret }} -apiVersion: v1 -kind: Secret +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: weave-net - namespace: kube-system -stringData: - network-password: {{ WeaveSecret }} + name: ippools.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Cluster + group: crd.projectcalico.org + version: v1 + names: + kind: IPPool + plural: ippools + singular: ippool + --- -{{- end }} +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org + labels: + role.kubernetes.io/networking: "1" +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkPolicy + plural: networkpolicies + singular: networkpolicy -apiVersion: v1 -kind: ServiceAccount +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition metadata: - name: weave-net + name: networksets.crd.projectcalico.org labels: - name: weave-net - namespace: kube-system + role.kubernetes.io/networking: "1" +spec: + scope: Namespaced + group: crd.projectcalico.org + version: v1 + names: + kind: NetworkSet + plural: networksets + singular: networkset + --- -apiVersion: rbac.authorization.k8s.io/v1 +# Source: calico/templates/rbac.yaml + +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: weave-net + name: calico labels: - name: weave-net role.kubernetes.io/networking: "1" - namespace: kube-system rules: - - apiGroups: - - '' + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] resources: - pods - - namespaces - nodes + - namespaces + verbs: + - get + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps verbs: - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: - list - watch - - apiGroups: - - extensions + # The CNI plugin patches pods/status. + - apiGroups: [""] resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipamblocks + - globalnetworkpolicies + - globalnetworksets - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities verbs: - get - list - watch - - apiGroups: - - 'networking.k8s.io' + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] resources: - - networkpolicies + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes verbs: - get - list - watch - - apiGroups: - - '' + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + +--- +# Flannel ClusterRole +# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: flannel + labels: + role.kubernetes.io/networking: "1" +rules: + - apiGroups: [""] + resources: + - pods + verbs: + - get + - apiGroups: [""] + resources: + - nodes + verbs: + - list + - watch + - apiGroups: [""] resources: - nodes/status verbs: - patch - - update +--- +# Bind the flannel ClusterRole to the canal ServiceAccount. +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: canal-flannel + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: weave-net + name: canal-calico + labels: + role.kubernetes.io/networking: "1" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico +subjects: +- kind: ServiceAccount + name: canal + namespace: kube-system + +{{ if .Networking.Canal.TyphaReplicas -}} +--- +# Source: calico/templates/calico-typha.yaml +# This manifest creates a Service, which will be backed by Calico's Typha daemon. +# Typha sits in between Felix and the API server, reducing Calico's load on the API server. + +apiVersion: v1 +kind: Service +metadata: + name: calico-typha + namespace: kube-system labels: - name: weave-net + k8s-app: calico-typha role.kubernetes.io/networking: "1" - namespace: kube-system -roleRef: - kind: ClusterRole - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system +spec: + ports: + - port: 5473 + protocol: TCP + targetPort: calico-typha + name: calico-typha + selector: + k8s-app: calico-typha + --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role + +# This manifest creates a Deployment of Typha to back the above service. + +apiVersion: apps/v1 +kind: Deployment metadata: - name: weave-net + name: calico-typha namespace: kube-system labels: - name: weave-net -rules: - - apiGroups: - - '' - resources: - - configmaps - resourceNames: - - weave-net - verbs: - - get - - update - - apiGroups: - - '' - resources: - - configmaps - verbs: - - create + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the + # typha_service_name variable in the canal-config ConfigMap above. + # + # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential + # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In + # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. + replicas: {{ or .Networking.Canal.TyphaReplicas 0 }} + revisionHistoryLimit: 2 + selector: + matchLabels: + k8s-app: calico-typha + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-typha + role.kubernetes.io/networking: "1" + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + # Since Calico can't network a pod until Typha is up, we need to run Typha itself + # as a host-networked pod. + serviceAccountName: canal + priorityClassName: system-cluster-critical + # fsGroup allows using projected serviceaccount tokens as described here kubernetes/kubernetes#82573 + securityContext: + fsGroup: 65534 + containers: + - image: calico/typha:v3.13.4 + name: calico-typha + ports: + - containerPort: 5473 + name: calico-typha + protocol: TCP + env: + # Enable "info" logging by default. Can be set to "debug" to increase verbosity. + - name: TYPHA_LOGSEVERITYSCREEN + value: "info" + # Disable logging to file and syslog since those don't make sense in Kubernetes. + - name: TYPHA_LOGFILEPATH + value: "none" + - name: TYPHA_LOGSEVERITYSYS + value: "none" + # Monitor the Kubernetes API to find the number of running instances and rebalance + # connections. + - name: TYPHA_CONNECTIONREBALANCINGMODE + value: "kubernetes" + - name: TYPHA_DATASTORETYPE + value: "kubernetes" + - name: TYPHA_HEALTHENABLED + value: "true" + - name: TYPHA_PROMETHEUSMETRICSENABLED + value: "{{- or .Networking.Canal.TyphaPrometheusMetricsEnabled "false" }}" + - name: TYPHA_PROMETHEUSMETRICSPORT + value: "{{- or .Networking.Canal.TyphaPrometheusMetricsPort "9093" }}" + livenessProbe: + httpGet: + path: /liveness + port: 9098 + host: localhost + periodSeconds: 30 + initialDelaySeconds: 30 + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readinessProbe: + httpGet: + path: /readiness + port: 9098 + host: localhost + periodSeconds: 10 + --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding + +# This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget metadata: - name: weave-net + name: calico-typha namespace: kube-system labels: - name: weave-net -roleRef: - kind: Role - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system + k8s-app: calico-typha + role.kubernetes.io/networking: "1" +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-typha +{{- end }} + --- -apiVersion: apps/v1 +# Source: calico/templates/calico-node.yaml +# This manifest installs the canal container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. kind: DaemonSet +apiVersion: apps/v1 metadata: - name: weave-net + name: canal + namespace: kube-system labels: - name: weave-net + k8s-app: canal role.kubernetes.io/networking: "1" - namespace: kube-system spec: - # Wait 5 seconds to let pod connect before rolling next pod selector: matchLabels: - name: weave-net - minReadySeconds: 5 + k8s-app: canal + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 template: metadata: labels: - name: weave-net + k8s-app: canal role.kubernetes.io/networking: "1" - annotations: - prometheus.io/scrape: "true" - scheduler.alpha.kubernetes.io/critical-pod: '' spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure canal gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: canal + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: calico/cni:v3.13.4 + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-canal.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: canal-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: canal-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes + # to communicate with Felix over the Policy Sync API. + - name: flexvol-driver + image: calico/pod2daemon-flexvol:v3.13.4 + volumeMounts: + - name: flexvol-driver-host + mountPath: /host/driver + securityContext: + privileged: true containers: - - name: weave - command: - - /home/weave/launch.sh + # Runs canal container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: calico/node:v3.13.4 env: - - name: HOSTNAME + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Configure route aggregation based on pod CIDR. + - name: USE_POD_CIDR + value: "true" + {{- if .Networking.Canal.TyphaReplicas }} + # Typha support: controlled by the ConfigMap. + - name: FELIX_TYPHAK8SSERVICENAME + valueFrom: + configMapKeyRef: + name: canal-config + key: typha_service_name + {{- end }} + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME valueFrom: fieldRef: - apiVersion: v1 fieldPath: spec.nodeName - - name: IPALLOC_RANGE - value: {{ .KubeControllerManager.ClusterCIDR }} - {{- if .Networking.Weave.MTU }} - - name: WEAVE_MTU - value: "{{ .Networking.Weave.MTU }}" - {{- end }} - {{- if .Networking.Weave.NoMasqLocal }} - - name: NO_MASQ_LOCAL - value: "{{ .Networking.Weave.NoMasqLocal }}" - {{- end }} - {{- if .Networking.Weave.ConnLimit }} - - name: CONN_LIMIT - value: "{{ .Networking.Weave.ConnLimit }}" - {{- end }} - {{- if .Networking.Weave.NetExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NetExtraArgs }}" - {{- end }} - {{- if WeaveSecret }} - - name: WEAVE_PASSWORD + # Don't enable BGP. + - name: CALICO_NETWORKING_BACKEND + value: "none" + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,canal" + # Period, in seconds, at which felix re-applies all iptables state + - name: FELIX_IPTABLESREFRESHINTERVAL + value: "60" + # No IP address needed. + - name: IP + value: "" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU valueFrom: - secretKeyRef: - name: weave-net - key: network-password - {{- end }} - image: 'weaveworks/weave-kube:2.7.0' - ports: - - name: metrics - containerPort: 6782 - readinessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - resources: - requests: - cpu: {{ or .Networking.Weave.CPURequest "50m" }} - memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.CPULimit }} - cpu: {{ .Networking.Weave.CPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }} + configMapKeyRef: + name: canal-config + key: veth_mtu + # Disable file logging so ` + "`" + `kubectl logs` + "`" + ` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "{{- or .Networking.Canal.LogSeveritySys "info" }}" + - name: FELIX_HEALTHENABLED + value: "true" + + # kops additions + # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom + - name: FELIX_CHAININSERTMODE + value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" + # Set Felix iptables binary variant, Legacy or NFT + - name: FELIX_IPTABLESBACKEND + value: "{{- or .Networking.Canal.IptablesBackend "Auto" }}" + # Set to enable the experimental Prometheus metrics server + - name: FELIX_PROMETHEUSMETRICSENABLED + value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" + # TCP port that the Prometheus metrics server should bind to + - name: FELIX_PROMETHEUSMETRICSPORT + value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}" + # Enable Prometheus Go runtime metrics collection + - name: FELIX_PROMETHEUSGOMETRICSENABLED + value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}" + # Enable Prometheus process metrics collection + - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED + value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" securityContext: privileged: true + resources: + requests: + cpu: {{ or .Networking.Canal.CPURequest "100m" }} + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + host: localhost + periodSeconds: 10 volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: xtables-lock - mountPath: /run/xtables.lock + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock readOnly: false - - name: weave-npc + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # This container runs flannel using the kube-subnet-mgr backend + # for allocating subnets. + - name: kube-flannel + image: quay.io/coreos/flannel:v0.11.0 + command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] + securityContext: + privileged: true env: - - name: HOSTNAME + - name: POD_NAME valueFrom: fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if .Networking.Weave.NPCExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NPCExtraArgs }}" + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: FLANNELD_IFACE + valueFrom: + configMapKeyRef: + name: canal-config + key: canal_iface + - name: FLANNELD_IP_MASQ + valueFrom: + configMapKeyRef: + name: canal-config + key: masquerade + {{- if eq .Networking.Canal.DisableFlannelForwardRules true }} + - name: FLANNELD_IPTABLES_FORWARD_RULES + value: "false" {{- end }} - image: 'weaveworks/weave-npc:2.7.0' - ports: - - name: metrics - containerPort: 6781 - resources: - requests: - cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }} - memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.NPCCPULimit }} - cpu: {{ .Networking.Weave.NPCCPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }} - securityContext: - privileged: true volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - - key: CriticalAddonsOnly - operator: Exists + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - name: flannel-cfg + mountPath: /etc/kube-flannel/ volumes: - - name: weavedb + # Used by canal. + - name: lib-modules hostPath: - path: /var/lib/weave - - name: cni-bin + path: /lib/modules + - name: var-run-calico hostPath: - path: /opt - - name: cni-bin2 + path: /var/run/calico + - name: var-lib-calico hostPath: - path: /home - - name: cni-conf + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + # Used by flannel. + - name: flannel-cfg + configMap: + name: canal-config + # Used to install CNI. + - name: cni-bin-dir hostPath: - path: /etc - - name: dbus + path: /opt/cni/bin + - name: cni-net-dir hostPath: - path: /var/lib/dbus - - name: lib-modules + path: /etc/cni/net.d + # Used to create per-pod Unix Domain Sockets + - name: policysync hostPath: - path: /lib/modules - - name: xtables-lock + type: DirectoryOrCreate + path: /var/run/nodeagent + # Used to install Flex Volume Driver + - name: flexvol-driver-host hostPath: - path: /run/xtables.lock - type: FileOrCreate - updateStrategy: - type: RollingUpdate + type: DirectoryOrCreate + path: "{{- or .Kubelet.VolumePluginDirectory "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/" }}nodeagent~uds" +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: canal + namespace: kube-system + labels: + role.kubernetes.io/networking: "1" `) -func cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplate, nil +func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate, nil } -func cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplateBytes() +func cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplateBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplate = []byte(`{{- $proxy := .EgressProxy }} -{{- $na := .NodeAuthorization.NodeAuthorizer }} -{{- $name := "node-authorizer" }} -{{- $namespace := "kube-system" }} +var _cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate = []byte(`# Pulled and modified from: https://github.com/weaveworks/weave/releases/download/v2.7.0/weave-daemonset-k8s-1.11.yaml + +{{- if WeaveSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: weave-net + namespace: kube-system +stringData: + network-password: {{ WeaveSecret }} --- +{{- end }} + apiVersion: v1 kind: ServiceAccount metadata: - name: {{ $name }} - namespace: {{ $namespace }} + name: weave-net labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io + name: weave-net + namespace: kube-system --- +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: kops:{{ $name }}:nodes-viewer + name: weave-net labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io + name: weave-net + role.kubernetes.io/networking: "1" + namespace: kube-system rules: -- apiGroups: - - "*" - resources: - - nodes - verbs: - - get - - list + - apiGroups: + - '' + resources: + - pods + - namespaces + - nodes + verbs: + - get + - list + - watch + - apiGroups: + - extensions + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - 'networking.k8s.io' + resources: + - networkpolicies + verbs: + - get + - list + - watch + - apiGroups: + - '' + resources: + - nodes/status + verbs: + - patch + - update --- -# permits the node access to create a CSR -kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kops:{{ $name }}:system:bootstrappers - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -roleRef: - kind: ClusterRole - name: system:node-bootstrapper - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: Group - name: system:bootstrappers - apiGroup: rbac.authorization.k8s.io ---- -# indicates to the controller to auto-sign the CSR for this group kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: kops:{{ $name }}:approval + name: weave-net labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io + name: weave-net + role.kubernetes.io/networking: "1" + namespace: kube-system roleRef: kind: ClusterRole - name: system:certificates.k8s.io:certificatesigningrequests:nodeclient + name: weave-net apiGroup: rbac.authorization.k8s.io subjects: -- kind: Group - name: system:bootstrappers - apiGroup: rbac.authorization.k8s.io + - kind: ServiceAccount + name: weave-net + namespace: kube-system --- -# the service permission requires to create the bootstrap tokens -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: kops:{{ $namespace }}:{{ $name }} - namespace: {{ $namespace }} + name: weave-net + namespace: kube-system labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io + name: weave-net rules: -- apiGroups: - - "*" - resources: - - secrets - verbs: - - create - - list + - apiGroups: + - '' + resources: + - configmaps + resourceNames: + - weave-net + verbs: + - get + - update + - apiGroups: + - '' + resources: + - configmaps + verbs: + - create --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: kops:{{ $namespace }}:{{ $name }} - namespace: {{ $namespace }} + name: weave-net + namespace: kube-system labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io + name: weave-net roleRef: - apiGroup: rbac.authorization.k8s.io kind: Role - name: kops:{{ $namespace }}:{{ $name }} -subjects: -- kind: ServiceAccount - name: {{ $name }} - namespace: {{ $namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: kops:{{ $name }}:nodes-viewer - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -roleRef: + name: weave-net apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kops:{{ $name }}:nodes-viewer subjects: -- kind: ServiceAccount - name: {{ $name }} - namespace: {{ $namespace }} + - kind: ServiceAccount + name: weave-net + namespace: kube-system --- +apiVersion: apps/v1 kind: DaemonSet -apiVersion: extensions/v1beta1 metadata: - name: {{ $name }} - namespace: {{ $namespace }} + name: weave-net labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io + name: weave-net + role.kubernetes.io/networking: "1" + namespace: kube-system spec: + # Wait 5 seconds to let pod connect before rolling next pod selector: - matchLabels: - k8s-app: {{ $name }} - template: - metadata: - labels: - k8s-app: {{ $name }} - annotations: - dns.alpha.kubernetes.io/internal: {{ $name }}-internal.{{ ClusterName }} - prometheus.io/port: "{{ $na.Port }}" - prometheus.io/scheme: "https" - prometheus.io/scrape: "true" - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - nodeSelector: - kubernetes.io/role: master - serviceAccount: {{ $name }} - securityContext: - fsGroup: 1000 - tolerations: - - key: "node-role.kubernetes.io/master" - effect: NoSchedule - volumes: - - name: config - hostPath: - path: /srv/kubernetes/node-authorizer - type: DirectoryOrCreate - containers: - - name: {{ $name }} - image: {{ $na.Image }} - args: - - server - - --authorization-timeout={{ $na.Timeout.Duration }} - - --authorizer={{ $na.Authorizer }} - - --cluster-name={{ ClusterName }} - {{- range $na.Features }} - - --feature={{ . }} - {{- end }} - - --listen=0.0.0.0:{{ $na.Port }} - - --tls-cert=/config/tls.pem - - --tls-client-ca=/config/ca.pem - - --tls-private-key=/config/tls-key.pem - - --token-ttl={{ $na.TokenTTL.Duration }} - {{- if $proxy }} + matchLabels: + name: weave-net + role.kubernetes.io/networking: "1" + minReadySeconds: 5 + template: + metadata: + labels: + name: weave-net + role.kubernetes.io/networking: "1" + annotations: + prometheus.io/scrape: "true" + spec: + containers: + - name: weave + command: + - /home/weave/launch.sh env: - - name: http_proxy - value: {{ $proxy.HTTPProxy.Host }}:{{ $proxy.HTTPProxy.Port }} - {{- if $proxy.ProxyExcludes }} - - name: no_proxy - value: {{ $proxy.ProxyExcludes }} + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: IPALLOC_RANGE + value: {{ .KubeControllerManager.ClusterCIDR }} + {{- if .Networking.Weave.MTU }} + - name: WEAVE_MTU + value: "{{ .Networking.Weave.MTU }}" {{- end }} - {{- end }} + {{- if .Networking.Weave.NoMasqLocal }} + - name: NO_MASQ_LOCAL + value: "{{ .Networking.Weave.NoMasqLocal }}" + {{- end }} + {{- if .Networking.Weave.ConnLimit }} + - name: CONN_LIMIT + value: "{{ .Networking.Weave.ConnLimit }}" + {{- end }} + {{- if .Networking.Weave.NetExtraArgs }} + - name: EXTRA_ARGS + value: "{{ .Networking.Weave.NetExtraArgs }}" + {{- end }} + {{- if WeaveSecret }} + - name: WEAVE_PASSWORD + valueFrom: + secretKeyRef: + name: weave-net + key: network-password + {{- end }} + image: 'weaveworks/weave-kube:2.7.0' + ports: + - name: metrics + containerPort: 6782 + readinessProbe: + httpGet: + host: 127.0.0.1 + path: /status + port: 6784 resources: + requests: + cpu: {{ or .Networking.Weave.CPURequest "50m" }} + memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }} limits: - cpu: 100m - memory: 64Mi + {{- if .Networking.Weave.CPULimit }} + cpu: {{ .Networking.Weave.CPULimit }} + {{- end }} + memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }} + securityContext: + privileged: true + volumeMounts: + - name: weavedb + mountPath: /weavedb + - name: cni-bin + mountPath: /host/opt + - name: cni-bin2 + mountPath: /host/home + - name: cni-conf + mountPath: /host/etc + - name: dbus + mountPath: /host/var/lib/dbus + - name: lib-modules + mountPath: /lib/modules + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + - name: weave-npc + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + {{- if .Networking.Weave.NPCExtraArgs }} + - name: EXTRA_ARGS + value: "{{ .Networking.Weave.NPCExtraArgs }}" + {{- end }} + image: 'weaveworks/weave-npc:2.7.0' + ports: + - name: metrics + containerPort: 6781 + resources: requests: - cpu: 10m - memory: 10Mi + cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }} + memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }} + limits: + {{- if .Networking.Weave.NPCCPULimit }} + cpu: {{ .Networking.Weave.NPCCPULimit }} + {{- end }} + memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }} + securityContext: + privileged: true volumeMounts: - - mountPath: /config - readOnly: true - name: config + - name: xtables-lock + mountPath: /run/xtables.lock + readOnly: false + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + hostPID: true + restartPolicy: Always + securityContext: + seLinuxOptions: {} + serviceAccountName: weave-net + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + - key: CriticalAddonsOnly + operator: Exists + volumes: + - name: weavedb + hostPath: + path: /var/lib/weave + - name: cni-bin + hostPath: + path: /opt + - name: cni-bin2 + hostPath: + path: /home + - name: cni-conf + hostPath: + path: /etc + - name: dbus + hostPath: + path: /var/lib/dbus + - name: lib-modules + hostPath: + path: /lib/modules + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + priorityClassName: system-node-critical + updateStrategy: + type: RollingUpdate `) -func cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplate, nil +func cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplateBytes() ([]byte, error) { + return _cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate, nil } -func cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplateBytes() +func cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate() (*asset, error) { + bytes, err := cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplateBytes() if err != nil { return nil, err } - info := bindataFileInfo{name: "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + info := bindataFileInfo{name: "cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} a := &asset{bytes: bytes, info: info} return a, nil } @@ -19539,105 +14545,6 @@ func cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate() (*asset, err return a, nil } -var _cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplate = []byte(`--- -apiVersion: extensions/v1beta1 -kind: PodSecurityPolicy -metadata: - annotations: - k8s-addon: podsecuritypolicy.addons.k8s.io - name: kube-system -spec: - allowedCapabilities: - - '*' - fsGroup: - rule: RunAsAny - hostPID: true - hostIPC: true - hostNetwork: true - hostPorts: - - min: 1 - max: 65536 - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - annotations: - k8s-addon: podsecuritypolicy.addons.k8s.io - name: kops:kube-system:psp -rules: -- apiGroups: - - policy - resources: - - podsecuritypolicies - resourceNames: - - kube-system - verbs: - - use ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: kops:kube-system:psp -roleRef: - kind: ClusterRole - name: kops:kube-system:psp - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: Group - name: system:masters - apiGroup: rbac.authorization.k8s.io -# permit the kubelets to access this policy (used for manifests) -- kind: User - name: kubelet - apiGroup: rbac.authorization.k8s.io -{{- if UseBootstrapTokens }} -- kind: Group - name: system:nodes - apiGroup: rbac.authorization.k8s.io -{{- end }} ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - annotations: - k8s-addon: podsecuritypolicy.addons.k8s.io - name: kops:kube-system:psp - namespace: kube-system -roleRef: - kind: ClusterRole - name: kops:kube-system:psp - apiGroup: rbac.authorization.k8s.io -subjects: -# permit the cluster wise admin to use this policy -- kind: Group - name: system:serviceaccounts:kube-system - apiGroup: rbac.authorization.k8s.io -`) - -func cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplateBytes() ([]byte, error) { - return _cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplate, nil -} - -func cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplate() (*asset, error) { - bytes, err := cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplateBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} - return a, nil -} - var _cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate = []byte(`--- apiVersion: policy/v1beta1 kind: PodSecurityPolicy @@ -21045,60 +15952,42 @@ func AssetNames() []string { var _bindata = map[string]func() (*asset, error){ "cloudup/resources/addons/OWNERS": cloudupResourcesAddonsOwners, "cloudup/resources/addons/anonymous-issuer-discovery.addons.k8s.io/k8s-1.16.yaml.template": cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate, - "cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template": cloudupResourcesAddonsAuthenticationAwsK8s110YamlTemplate, "cloudup/resources/addons/authentication.aws/k8s-1.12.yaml.template": cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate, "cloudup/resources/addons/authentication.kope.io/k8s-1.12.yaml": cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml, - "cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml": cloudupResourcesAddonsAuthenticationKopeIoK8s18Yaml, "cloudup/resources/addons/cluster-autoscaler.addons.k8s.io/k8s-1.15.yaml.template": cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate, "cloudup/resources/addons/core.addons.k8s.io/addon.yaml": cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml, "cloudup/resources/addons/core.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate, - "cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template": cloudupResourcesAddonsCoreAddonsK8sIoK8s17YamlTemplate, "cloudup/resources/addons/core.addons.k8s.io/v1.4.0.yaml": cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml, "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate, - "cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template": cloudupResourcesAddonsCorednsAddonsK8sIoK8s16YamlTemplate, "cloudup/resources/addons/digitalocean-cloud-controller.addons.k8s.io/k8s-1.8.yaml.template": cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate, "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate, - "cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template": cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s16YamlTemplate, "cloudup/resources/addons/external-dns.addons.k8s.io/README.md": cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd, "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate, - "cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template": cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s16YamlTemplate, "cloudup/resources/addons/kops-controller.addons.k8s.io/k8s-1.16.yaml.template": cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate, "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate, - "cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template": cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s16YamlTemplate, "cloudup/resources/addons/kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml": cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml, "cloudup/resources/addons/limit-range.addons.k8s.io/addon.yaml": cloudupResourcesAddonsLimitRangeAddonsK8sIoAddonYaml, "cloudup/resources/addons/limit-range.addons.k8s.io/v1.5.0.yaml": cloudupResourcesAddonsLimitRangeAddonsK8sIoV150Yaml, "cloudup/resources/addons/metadata-proxy.addons.k8s.io/addon.yaml": cloudupResourcesAddonsMetadataProxyAddonsK8sIoAddonYaml, "cloudup/resources/addons/metadata-proxy.addons.k8s.io/v0.1.12.yaml": cloudupResourcesAddonsMetadataProxyAddonsK8sIoV0112Yaml, "cloudup/resources/addons/metrics-server.addons.k8s.io/k8s-1.11.yaml.template": cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate, - "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template": cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s110YamlTemplate, "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate, "cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.16.yaml.template": cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate, "cloudup/resources/addons/networking.cilium.io/k8s-1.12-v1.8.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate, "cloudup/resources/addons/networking.cilium.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate, - "cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template": cloudupResourcesAddonsNetworkingCiliumIoK8s17YamlTemplate, "cloudup/resources/addons/networking.flannel/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate, - "cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template": cloudupResourcesAddonsNetworkingFlannelK8s16YamlTemplate, "cloudup/resources/addons/networking.kope.io/k8s-1.12.yaml": cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml, - "cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml": cloudupResourcesAddonsNetworkingKopeIoK8s16Yaml, "cloudup/resources/addons/networking.kuberouter/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate, - "cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template": cloudupResourcesAddonsNetworkingKuberouterK8s16YamlTemplate, "cloudup/resources/addons/networking.projectcalico.org/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate, "cloudup/resources/addons/networking.projectcalico.org/k8s-1.16.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate, - "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17V3YamlTemplate, - "cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17YamlTemplate, "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate, "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.15.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate, "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.16.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate, - "cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template": cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s19YamlTemplate, "cloudup/resources/addons/networking.weave/k8s-1.12.yaml.template": cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate, - "cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template": cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplate, - "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template": cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplate, "cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate, "cloudup/resources/addons/node-termination-handler.aws/k8s-1.11.yaml.template": cloudupResourcesAddonsNodeTerminationHandlerAwsK8s111YamlTemplate, "cloudup/resources/addons/nodelocaldns.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsNodelocaldnsAddonsK8sIoK8s112YamlTemplate, "cloudup/resources/addons/openstack.addons.k8s.io/k8s-1.13.yaml.template": cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate, - "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template": cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplate, "cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.12.yaml.template": cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate, "cloudup/resources/addons/rbac.addons.k8s.io/k8s-1.8.yaml": cloudupResourcesAddonsRbacAddonsK8sIoK8s18Yaml, "cloudup/resources/addons/scheduler.addons.k8s.io/v1.7.0.yaml": cloudupResourcesAddonsSchedulerAddonsK8sIoV170Yaml, @@ -21159,12 +16048,10 @@ var _bintree = &bintree{nil, map[string]*bintree{ "k8s-1.16.yaml.template": {cloudupResourcesAddonsAnonymousIssuerDiscoveryAddonsK8sIoK8s116YamlTemplate, map[string]*bintree{}}, }}, "authentication.aws": {nil, map[string]*bintree{ - "k8s-1.10.yaml.template": {cloudupResourcesAddonsAuthenticationAwsK8s110YamlTemplate, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsAuthenticationAwsK8s112YamlTemplate, map[string]*bintree{}}, }}, "authentication.kope.io": {nil, map[string]*bintree{ "k8s-1.12.yaml": {cloudupResourcesAddonsAuthenticationKopeIoK8s112Yaml, map[string]*bintree{}}, - "k8s-1.8.yaml": {cloudupResourcesAddonsAuthenticationKopeIoK8s18Yaml, map[string]*bintree{}}, }}, "cluster-autoscaler.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.15.yaml.template": {cloudupResourcesAddonsClusterAutoscalerAddonsK8sIoK8s115YamlTemplate, map[string]*bintree{}}, @@ -21172,31 +16059,26 @@ var _bintree = &bintree{nil, map[string]*bintree{ "core.addons.k8s.io": {nil, map[string]*bintree{ "addon.yaml": {cloudupResourcesAddonsCoreAddonsK8sIoAddonYaml, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsCoreAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.7.yaml.template": {cloudupResourcesAddonsCoreAddonsK8sIoK8s17YamlTemplate, map[string]*bintree{}}, "v1.4.0.yaml": {cloudupResourcesAddonsCoreAddonsK8sIoV140Yaml, map[string]*bintree{}}, }}, "coredns.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsCorednsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.6.yaml.template": {cloudupResourcesAddonsCorednsAddonsK8sIoK8s16YamlTemplate, map[string]*bintree{}}, }}, "digitalocean-cloud-controller.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.8.yaml.template": {cloudupResourcesAddonsDigitaloceanCloudControllerAddonsK8sIoK8s18YamlTemplate, map[string]*bintree{}}, }}, "dns-controller.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.6.yaml.template": {cloudupResourcesAddonsDnsControllerAddonsK8sIoK8s16YamlTemplate, map[string]*bintree{}}, }}, "external-dns.addons.k8s.io": {nil, map[string]*bintree{ "README.md": {cloudupResourcesAddonsExternalDnsAddonsK8sIoReadmeMd, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.6.yaml.template": {cloudupResourcesAddonsExternalDnsAddonsK8sIoK8s16YamlTemplate, map[string]*bintree{}}, }}, "kops-controller.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.16.yaml.template": {cloudupResourcesAddonsKopsControllerAddonsK8sIoK8s116YamlTemplate, map[string]*bintree{}}, }}, "kube-dns.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.6.yaml.template": {cloudupResourcesAddonsKubeDnsAddonsK8sIoK8s16YamlTemplate, map[string]*bintree{}}, }}, "kubelet-api.rbac.addons.k8s.io": {nil, map[string]*bintree{ "k8s-1.9.yaml": {cloudupResourcesAddonsKubeletApiRbacAddonsK8sIoK8s19Yaml, map[string]*bintree{}}, @@ -21213,45 +16095,35 @@ var _bintree = &bintree{nil, map[string]*bintree{ "k8s-1.11.yaml.template": {cloudupResourcesAddonsMetricsServerAddonsK8sIoK8s111YamlTemplate, map[string]*bintree{}}, }}, "networking.amazon-vpc-routed-eni": {nil, map[string]*bintree{ - "k8s-1.10.yaml.template": {cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s110YamlTemplate, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s112YamlTemplate, map[string]*bintree{}}, "k8s-1.16.yaml.template": {cloudupResourcesAddonsNetworkingAmazonVpcRoutedEniK8s116YamlTemplate, map[string]*bintree{}}, }}, "networking.cilium.io": {nil, map[string]*bintree{ "k8s-1.12-v1.8.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s112V18YamlTemplate, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.7.yaml.template": {cloudupResourcesAddonsNetworkingCiliumIoK8s17YamlTemplate, map[string]*bintree{}}, }}, "networking.flannel": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingFlannelK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.6.yaml.template": {cloudupResourcesAddonsNetworkingFlannelK8s16YamlTemplate, map[string]*bintree{}}, }}, "networking.kope.io": {nil, map[string]*bintree{ "k8s-1.12.yaml": {cloudupResourcesAddonsNetworkingKopeIoK8s112Yaml, map[string]*bintree{}}, - "k8s-1.6.yaml": {cloudupResourcesAddonsNetworkingKopeIoK8s16Yaml, map[string]*bintree{}}, }}, "networking.kuberouter": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingKuberouterK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.6.yaml.template": {cloudupResourcesAddonsNetworkingKuberouterK8s16YamlTemplate, map[string]*bintree{}}, }}, "networking.projectcalico.org": {nil, map[string]*bintree{ - "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.16.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate, map[string]*bintree{}}, - "k8s-1.7-v3.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17V3YamlTemplate, map[string]*bintree{}}, - "k8s-1.7.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s17YamlTemplate, map[string]*bintree{}}, + "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s112YamlTemplate, map[string]*bintree{}}, + "k8s-1.16.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgK8s116YamlTemplate, map[string]*bintree{}}, }}, "networking.projectcalico.org.canal": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s112YamlTemplate, map[string]*bintree{}}, "k8s-1.15.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s115YamlTemplate, map[string]*bintree{}}, "k8s-1.16.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s116YamlTemplate, map[string]*bintree{}}, - "k8s-1.9.yaml.template": {cloudupResourcesAddonsNetworkingProjectcalicoOrgCanalK8s19YamlTemplate, map[string]*bintree{}}, }}, "networking.weave": {nil, map[string]*bintree{ "k8s-1.12.yaml.template": {cloudupResourcesAddonsNetworkingWeaveK8s112YamlTemplate, map[string]*bintree{}}, - "k8s-1.9.yaml.template": {cloudupResourcesAddonsNetworkingWeaveK8s19YamlTemplate, map[string]*bintree{}}, }}, "node-authorizer.addons.k8s.io": {nil, map[string]*bintree{ - "k8s-1.10.yaml.template": {cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s110YamlTemplate, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsNodeAuthorizerAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, }}, "node-termination-handler.aws": {nil, map[string]*bintree{ @@ -21264,7 +16136,6 @@ var _bintree = &bintree{nil, map[string]*bintree{ "k8s-1.13.yaml.template": {cloudupResourcesAddonsOpenstackAddonsK8sIoK8s113YamlTemplate, map[string]*bintree{}}, }}, "podsecuritypolicy.addons.k8s.io": {nil, map[string]*bintree{ - "k8s-1.10.yaml.template": {cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s110YamlTemplate, map[string]*bintree{}}, "k8s-1.12.yaml.template": {cloudupResourcesAddonsPodsecuritypolicyAddonsK8sIoK8s112YamlTemplate, map[string]*bintree{}}, }}, "rbac.addons.k8s.io": {nil, map[string]*bintree{ diff --git a/upup/models/cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template b/upup/models/cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template deleted file mode 100644 index d7e6990a5b200..0000000000000 --- a/upup/models/cloudup/resources/addons/authentication.aws/k8s-1.10.yaml.template +++ /dev/null @@ -1,69 +0,0 @@ ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - namespace: kube-system - name: aws-iam-authenticator - labels: - k8s-app: aws-iam-authenticator -spec: - updateStrategy: - type: RollingUpdate - template: - metadata: - annotations: - scheduler.alpha.kubernetes.io/critical-pod: "" - labels: - k8s-app: aws-iam-authenticator - spec: - # run on the host network (don't depend on CNI) - hostNetwork: true - - # run on each master node - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - key: CriticalAddonsOnly - operator: Exists - - # run `aws-iam-authenticator server` with three volumes - # - config (mounted from the ConfigMap at /etc/aws-iam-authenticator/config.yaml) - # - state (persisted TLS certificate and keys, mounted from the host) - # - output (output kubeconfig to plug into your apiserver configuration, mounted from the host) - containers: - - name: aws-iam-authenticator - image: {{ or .Authentication.Aws.Image "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon/aws-iam-authenticator:v0.4.0" }} - args: - - server - - --config=/etc/aws-iam-authenticator/config.yaml - - --state-dir=/var/aws-iam-authenticator - - --kubeconfig-pregenerated=true - - resources: - requests: - memory: {{ or .Authentication.Aws.MemoryRequest "20Mi" }} - cpu: {{ or .Authentication.Aws.CPURequest "10m" }} - limits: - memory: {{ or .Authentication.Aws.MemoryLimit "20Mi" }} - cpu: {{ or .Authentication.Aws.CPULimit "100m" }} - - volumeMounts: - - name: config - mountPath: /etc/aws-iam-authenticator/ - - name: state - mountPath: /var/aws-iam-authenticator/ - - name: output - mountPath: /etc/kubernetes/aws-iam-authenticator/ - - volumes: - - name: config - configMap: - name: aws-iam-authenticator - - name: output - hostPath: - path: /srv/kubernetes/aws-iam-authenticator/ - - name: state - hostPath: - path: /srv/kubernetes/aws-iam-authenticator/ diff --git a/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml b/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml deleted file mode 100644 index 62f4cdfcae450..0000000000000 --- a/upup/models/cloudup/resources/addons/authentication.kope.io/k8s-1.8.yaml +++ /dev/null @@ -1,185 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" - ---- - -apiVersion: v1 -kind: Service -metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - selector: - app: auth-api - ports: - - port: 443 - targetPort: 9002 - ---- - -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - template: - metadata: - labels: - app: auth-api - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - serviceAccountName: auth-api - hostNetwork: true - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - key: "CriticalAddonsOnly" - operator: "Exists" - containers: - - name: auth-api - image: kopeio/auth-api:1.0.20171125 - imagePullPolicy: Always - ports: - - containerPort: 9001 - command: - - /auth-api - - --listen=127.0.0.1:9001 - - --secure-port=9002 - - --etcd-servers=http://127.0.0.1:4001 - - --v=8 - - --storage-backend=etcd2 - ---- - -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1alpha1.auth.kope.io - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - insecureSkipTLSVerify: true - group: auth.kope.io - groupPriorityMinimum: 1000 - versionPriority: 15 - service: - name: auth-api - namespace: kopeio-auth - version: v1alpha1 - ---- - -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1alpha1.config.auth.kope.io - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -spec: - insecureSkipTLSVerify: true - group: config.auth.kope.io - groupPriorityMinimum: 1000 - versionPriority: 15 - service: - name: auth-api - namespace: kopeio-auth - version: v1alpha1 - ---- - -kind: ServiceAccount -apiVersion: v1 -metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: kopeio-auth:auth-api:auth-reader - namespace: kube-system - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: auth-api - namespace: kopeio-auth - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: kopeio-auth:system:auth-delegator - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: auth-api - namespace: kopeio-auth - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -rules: -- apiGroups: ["auth.kope.io"] - resources: ["users"] - verbs: ["get", "list", "watch"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: auth-api - namespace: kopeio-auth - labels: - k8s-addon: authentication.kope.io - role.kubernetes.io/authentication: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: auth-api -subjects: -- kind: ServiceAccount - name: auth-api - namespace: kopeio-auth diff --git a/upup/models/cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template b/upup/models/cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template deleted file mode 100644 index 841e40330c85f..0000000000000 --- a/upup/models/cloudup/resources/addons/core.addons.k8s.io/k8s-1.7.yaml.template +++ /dev/null @@ -1,153 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - labels: - kubernetes.io/bootstrapping: rbac-defaults - name: system:cloud-controller-manager -rules: -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - nodes - verbs: - - '*' -- apiGroups: - - "" - resources: - - services - verbs: - - list - - patch - - update - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update -- apiGroups: - - "" - resources: - - endpoints - verbs: - - create - - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create -- apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - get - - list - - update - - watch -- apiGroups: - - "" - resources: - - secrets - verbs: - - list - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cloud-controller-manager - namespace: kube-system - ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: system:cloud-controller-manager -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:cloud-controller-manager -subjects: -- kind: ServiceAccount - name: cloud-controller-manager - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - labels: - k8s-app: cloud-controller-manager - name: cloud-controller-manager - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: cloud-controller-manager - template: - metadata: - labels: - k8s-app: cloud-controller-manager - spec: - nodeSelector: - node-role.kubernetes.io/master: "" - serviceAccountName: cloud-controller-manager - containers: - - name: cloud-controller-manager - # for in-tree providers we use k8s.gcr.io/cloud-controller-manager - # this can be replaced with any other image for out-of-tree providers - image: k8s.gcr.io/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work? - command: - - /usr/local/bin/cloud-controller-manager - - --cloud-provider={{ .CloudProvider }} - - --leader-elect=true - - --use-service-account-credentials - # these flags will vary for every cloud provider - - --allocate-node-cidrs=true - - --configure-cloud-routes=true - - --cluster-cidr={{ .KubeControllerManager.ClusterCIDR }} - volumeMounts: - - name: ca-certificates - mountPath: /etc/ssl/certs - hostNetwork: true - dnsPolicy: Default - volumes: - - name: ca-certificates - hostPath: - path: /etc/ssl/certs - tolerations: - # this is required so CCM can bootstrap itself - - key: node.cloudprovider.kubernetes.io/uninitialized - value: "true" - effect: NoSchedule - # this is to have the daemonset runnable on master nodes - # the taint may vary depending on your cluster setup - - key: node-role.kubernetes.io/master - effect: NoSchedule - # this is to restrict CCM to only run on master nodes - # the node selector may vary depending on your cluster setup - - key: "CriticalAddonsOnly" - operator: "Exists" - diff --git a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template deleted file mode 100644 index 60d470e51bfbb..0000000000000 --- a/upup/models/cloudup/resources/addons/coredns.addons.k8s.io/k8s-1.6.yaml.template +++ /dev/null @@ -1,197 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: coredns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - k8s-addon: coredns.addons.k8s.io ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - kubernetes.io/bootstrapping: rbac-defaults - k8s-addon: coredns.addons.k8s.io - name: system:coredns -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - labels: - kubernetes.io/bootstrapping: rbac-defaults - k8s-addon: coredns.addons.k8s.io - name: system:coredns -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:coredns -subjects: -- kind: ServiceAccount - name: coredns - namespace: kube-system ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: coredns - namespace: kube-system - labels: - addonmanager.kubernetes.io/mode: EnsureExists -data: - Corefile: | - {{- if KubeDNS.ExternalCoreFile }} -{{ KubeDNS.ExternalCoreFile | indent 4 }} - {{- else }} - .:53 { - errors - health { - lameduck 5s - } - kubernetes {{ KubeDNS.Domain }}. in-addr.arpa ip6.arpa { - pods insecure - fallthrough in-addr.arpa ip6.arpa - } - prometheus :9153 - forward . /etc/resolv.conf { - max_concurrent 1000 - } - loop - cache 30 - loadbalance - reload - } - {{- end }} ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coredns - namespace: kube-system - labels: - k8s-app: kube-dns - k8s-addon: coredns.addons.k8s.io - kubernetes.io/cluster-service: "true" -spec: - replicas: 2 - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - spec: - priorityClassName: system-cluster-critical - serviceAccountName: coredns - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - name: coredns - image: {{ if KubeDNS.CoreDNSImage }}{{ KubeDNS.CoreDNSImage }}{{ else }}k8s.gcr.io/coredns:1.7.0{{ end }} - imagePullPolicy: IfNotPresent - resources: - limits: - memory: {{ KubeDNS.MemoryLimit }} - requests: - cpu: {{ KubeDNS.CPURequest }} - memory: {{ KubeDNS.MemoryRequest }} - args: [ "-conf", "/etc/coredns/Corefile" ] - volumeMounts: - - name: config-volume - mountPath: /etc/coredns - readOnly: true - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - - containerPort: 9153 - name: metrics - protocol: TCP - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - all - readOnlyRootFilesystem: true - livenessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - dnsPolicy: Default - volumes: - - name: config-volume - configMap: - name: coredns - items: - - key: Corefile - path: Corefile ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - annotations: - prometheus.io/port: "9153" - prometheus.io/scrape: "true" - labels: - k8s-addon: coredns.addons.k8s.io - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "CoreDNS" -spec: - selector: - k8s-app: kube-dns - clusterIP: {{ KubeDNS.ServerIP }} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP - - name: metrics - port: 9153 - protocol: TCP diff --git a/upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template deleted file mode 100644 index af94478cab174..0000000000000 --- a/upup/models/cloudup/resources/addons/dns-controller.addons.k8s.io/k8s-1.6.yaml.template +++ /dev/null @@ -1,116 +0,0 @@ -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - name: dns-controller - namespace: kube-system - labels: - k8s-addon: dns-controller.addons.k8s.io - k8s-app: dns-controller - version: v1.19.0-alpha.5 -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: dns-controller - template: - metadata: - labels: - k8s-addon: dns-controller.addons.k8s.io - k8s-app: dns-controller - version: v1.19.0-alpha.5 - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]' - spec: - tolerations: - - key: "node-role.kubernetes.io/master" - effect: NoSchedule - nodeSelector: - node-role.kubernetes.io/master: "" - dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns) - hostNetwork: true - serviceAccount: dns-controller - containers: - - name: dns-controller - image: k8s.gcr.io/kops/dns-controller:1.19.0-alpha.5 - command: -{{ range $arg := DnsControllerArgv }} - - "{{ $arg }}" -{{ end }} -{{- if .EgressProxy }} - env: -{{ range $name, $value := ProxyEnv }} - - name: {{ $name }} - value: {{ $value }} -{{ end }} -{{- end }} -{{- if eq .CloudProvider "digitalocean" }} - env: - - name: DIGITALOCEAN_ACCESS_TOKEN - valueFrom: - secretKeyRef: - name: digitalocean - key: access-token -{{- end }} - resources: - requests: - cpu: 50m - memory: 50Mi - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: dns-controller - namespace: kube-system - labels: - k8s-addon: dns-controller.addons.k8s.io - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - k8s-addon: dns-controller.addons.k8s.io - name: kops:dns-controller -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - ingress - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - "extensions" - resources: - - ingresses - verbs: - - get - - list - - watch - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - k8s-addon: dns-controller.addons.k8s.io - name: kops:dns-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kops:dns-controller -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: system:serviceaccount:kube-system:dns-controller diff --git a/upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template deleted file mode 100644 index 86cb33acd0a7d..0000000000000 --- a/upup/models/cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.6.yaml.template +++ /dev/null @@ -1,92 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: external-dns - namespace: kube-system - labels: - k8s-addon: external-dns.addons.k8s.io - k8s-app: external-dns - version: v0.4.4 -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: external-dns - template: - metadata: - labels: - k8s-addon: external-dns.addons.k8s.io - k8s-app: external-dns - version: v0.4.4 - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]' - spec: - serviceAccount: external-dns - tolerations: - - key: "node-role.kubernetes.io/master" - effect: NoSchedule - nodeSelector: - node-role.kubernetes.io/master: "" - dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns) - hostNetwork: true - containers: - - name: external-dns - image: registry.opensource.zalan.do/teapot/external-dns:v0.4.4 - args: -{{ range $arg := ExternalDnsArgv }} - - "{{ $arg }}" -{{ end }} - resources: - requests: - cpu: 50m - memory: 50Mi ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: external-dns - namespace: kube-system - labels: - k8s-addon: external-dns.addons.k8s.io - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - k8s-addon: external-dns.addons.k8s.io - name: kops:external-dns -rules: -- apiGroups: - - "" - resources: - - services - verbs: - - list -- apiGroups: - - extensions - resources: - - ingresses - verbs: - - list - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - k8s-addon: external-dns.addons.k8s.io - name: kops:external-dns -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kops:external-dns -subjects: -- kind: ServiceAccount - name: external-dns - namespace: kube-system diff --git a/upup/models/cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template deleted file mode 100644 index e4b9f78e65827..0000000000000 --- a/upup/models/cloudup/resources/addons/kube-dns.addons.k8s.io/k8s-1.6.yaml.template +++ /dev/null @@ -1,311 +0,0 @@ -# Copyright 2019 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -{{- if or (.KubeDNS.UpstreamNameservers) (.KubeDNS.StubDomains) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-dns - namespace: kube-system -data: - {{- if .KubeDNS.UpstreamNameservers }} - upstreamNameservers: | - {{ ToJSON .KubeDNS.UpstreamNameservers }} - {{- end }} - {{- if .KubeDNS.StubDomains }} - stubDomains: | - {{ ToJSON .KubeDNS.StubDomains }} - {{- end }} - ---- -{{- end }} - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - k8s-app: kube-dns-autoscaler - kubernetes.io/cluster-service: "true" -spec: - template: - metadata: - labels: - k8s-app: kube-dns-autoscaler - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - containers: - - name: autoscaler - image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.3 - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=kube-dns-autoscaler - # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base - - --target=Deployment/kube-dns - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}} - - --logtostderr=true - - --v=2 - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - serviceAccountName: kube-dns-autoscaler - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" -spec: - # replicas: not specified here: - # 1. In order to make Addon Manager do not reconcile this replicas parameter. - # 2. Default is 1. - # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. - strategy: - rollingUpdate: - maxSurge: 10% - maxUnavailable: 0 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - # For 1.6, we keep the old tolerations in case of a downgrade to 1.5 - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - prometheus.io/scrape: 'true' - prometheus.io/port: '10055' - spec: - dnsPolicy: Default # Don't use cluster DNS. - serviceAccountName: kube-dns - volumes: - - name: kube-dns-config - configMap: - name: kube-dns - optional: true - - containers: - - name: kubedns - image: k8s.gcr.io/k8s-dns-kube-dns:1.15.13 - resources: - # TODO: Set memory limits when we've profiled the container for large - # clusters, then set request = limit to keep this container in - # guaranteed class. Currently, this container falls into the - # "burstable" category so the kubelet doesn't backoff from restarting it. - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - livenessProbe: - httpGet: - path: /healthcheck/kubedns - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /readiness - port: 8081 - scheme: HTTP - # we poll on pod startup for the Kubernetes master service and - # only setup the /readiness HTTP server once that's available. - initialDelaySeconds: 3 - timeoutSeconds: 5 - args: - - --config-dir=/kube-dns-config - - --dns-port=10053 - - --domain={{ KubeDNS.Domain }}. - - --v=2 - env: - - name: PROMETHEUS_PORT - value: "10055" - ports: - - containerPort: 10053 - name: dns-local - protocol: UDP - - containerPort: 10053 - name: dns-tcp-local - protocol: TCP - - containerPort: 10055 - name: metrics - protocol: TCP - volumeMounts: - - name: kube-dns-config - mountPath: /kube-dns-config - - - name: dnsmasq - image: k8s.gcr.io/k8s-dns-dnsmasq-nanny:1.15.13 - livenessProbe: - httpGet: - path: /healthcheck/dnsmasq - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - -v=2 - - -logtostderr - - -configDir=/etc/k8s/dns/dnsmasq-nanny - - -restartDnsmasq=true - - -- - - -k - - --cache-size={{ KubeDNS.CacheMaxSize }} - - --dns-forward-max={{ KubeDNS.CacheMaxConcurrent }} - - --no-negcache - - --log-facility=- - - --server=/{{ KubeDNS.Domain }}/127.0.0.1#10053 - - --server=/in-addr.arpa/127.0.0.1#10053 - - --server=/in6.arpa/127.0.0.1#10053 - - --min-port=1024 - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - # see: https://github.com/kubernetes/kubernetes/issues/29055 for details - resources: - requests: - cpu: 150m - memory: 20Mi - volumeMounts: - - name: kube-dns-config - mountPath: /etc/k8s/dns/dnsmasq-nanny - - - name: sidecar - image: k8s.gcr.io/k8s-dns-sidecar:1.15.13 - livenessProbe: - httpGet: - path: /metrics - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - --v=2 - - --logtostderr - - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A - - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{ KubeDNS.Domain }},5,A - ports: - - containerPort: 10054 - name: metrics - protocol: TCP - resources: - requests: - memory: 20Mi - cpu: 10m - ---- - -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - kubernetes.io/name: "KubeDNS" -spec: - selector: - k8s-app: kube-dns - clusterIP: {{ KubeDNS.ServerIP }} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - k8s-addon: kube-dns.addons.k8s.io - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - k8s-addon: kube-dns.addons.k8s.io - name: kube-dns-autoscaler -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list"] - - apiGroups: [""] - resources: ["replicationcontrollers/scale"] - verbs: ["get", "update"] - - apiGroups: ["extensions"] - resources: ["deployments/scale", "replicasets/scale"] - verbs: ["get", "update"] -# Remove the configmaps rule once below issue is fixed: -# kubernetes-incubator/cluster-proportional-autoscaler#16 - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "create"] - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - k8s-addon: kube-dns.addons.k8s.io - name: kube-dns-autoscaler -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-dns-autoscaler -subjects: -- kind: ServiceAccount - name: kube-dns-autoscaler - namespace: kube-system diff --git a/upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template b/upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template deleted file mode 100644 index 1f135e4bbd369..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.amazon-vpc-routed-eni/k8s-1.10.yaml.template +++ /dev/null @@ -1,132 +0,0 @@ -# Vendored from https://github.com/aws/amazon-vpc-cni-k8s/blob/v1.3.3/config/v1.3/aws-k8s-cni.yaml - -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: aws-node -rules: -- apiGroups: - - crd.k8s.amazonaws.com - resources: - - "*" - - namespaces - verbs: - - "*" -- apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: ["list", "watch", "get"] -- apiGroups: ["extensions"] - resources: - - daemonsets - verbs: ["list", "watch"] ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: aws-node - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: aws-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: aws-node -subjects: -- kind: ServiceAccount - name: aws-node - namespace: kube-system ---- -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: aws-node - namespace: kube-system - labels: - k8s-app: aws-node -spec: - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - k8s-app: aws-node - template: - metadata: - labels: - k8s-app: aws-node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - serviceAccountName: aws-node - hostNetwork: true - priorityClassName: system-node-critical - tolerations: - - operator: Exists - containers: - - image: "{{- or .Networking.AmazonVPC.ImageName "602401143452.dkr.ecr.us-west-2.amazonaws.com/amazon-k8s-cni:1.3.3" }}" - ports: - - containerPort: 61678 - name: metrics - name: aws-node - env: - - name: CLUSTER_NAME - value: {{ ClusterName }} - - name: AWS_VPC_K8S_CNI_LOGLEVEL - value: DEBUG - - name: MY_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- range .Networking.AmazonVPC.Env }} - - name: {{ .Name }} - value: "{{ .Value }}" - {{- end }} - resources: - requests: - cpu: 10m - securityContext: - privileged: true - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - - mountPath: /host/var/log - name: log-dir - - mountPath: /var/run/docker.sock - name: dockersock - volumes: - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - - name: log-dir - hostPath: - path: /var/log - - name: dockersock - hostPath: - path: /var/run/docker.sock ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: eniconfigs.crd.k8s.amazonaws.com -spec: - scope: Cluster - group: crd.k8s.amazonaws.com - version: v1alpha1 - names: - plural: eniconfigs - singular: eniconfig - kind: ENIConfig diff --git a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template b/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template deleted file mode 100644 index 9f7a2692fff2a..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.cilium.io/k8s-1.7.yaml.template +++ /dev/null @@ -1,692 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: cilium-config - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" -data: -{{ with .Networking.Cilium }} - # Identity allocation mode selects how identities are shared between cilium - # nodes by setting how they are stored. The options are "crd" or "kvstore". - # - "crd" stores identities in kubernetes as CRDs (custom resource definition). - # These can be queried with: - # kubectl get ciliumid - # - "kvstore" stores identities in a kvstore, etcd or consul, that is - # configured below. Cilium versions before 1.6 supported only the kvstore - # backend. Upgrades from these older cilium versions should continue using - # the kvstore by commenting out the identity-allocation-mode below, or - # setting it to "kvstore". - identity-allocation-mode: crd - # If you want to run cilium in debug mode change this value to true - debug: "{{- if .Debug -}}true{{- else -}}false{{- end -}}" - {{ if .EnablePrometheusMetrics }} - # If you want metrics enabled in all of your Cilium agents, set the port for - # which the Cilium agents will have their metrics exposed. - # This option deprecates the "prometheus-serve-addr" in the - # "cilium-metrics-config" ConfigMap - # NOTE that this will open the port on ALL nodes where Cilium pods are - # scheduled. - prometheus-serve-addr: ":{{- or .AgentPrometheusPort "9090" }}" - {{ end }} - # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 - # address. - enable-ipv4: "true" - # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 - # address. - enable-ipv6: "false" - # If you want cilium monitor to aggregate tracing for packets, set this level - # to "low", "medium", or "maximum". The higher the level, the less packets - # that will be seen in monitor output. - monitor-aggregation: "{{- if eq .MonitorAggregation "" -}}medium{{- else -}}{{ .MonitorAggregation }}{{- end -}}" - # ct-global-max-entries-* specifies the maximum number of connections - # supported across all endpoints, split by protocol: tcp or other. One pair - # of maps uses these values for IPv4 connections, and another pair of maps - # use these values for IPv6 connections. - # - # If these values are modified, then during the next Cilium startup the - # tracking of ongoing connections may be disrupted. This may lead to brief - # policy drops or a change in loadbalancing decisions for a connection. - # - # For users upgrading from Cilium 1.2 or earlier, to minimize disruption - # during the upgrade process, comment out these options. - bpf-ct-global-tcp-max: "{{ .BPFCTGlobalTCPMax }}" - bpf-ct-global-any-max: "{{ .BPFCTGlobalAnyMax }}" - - # Pre-allocation of map entries allows per-packet latency to be reduced, at - # the expense of up-front memory allocation for the entries in the maps. The - # default value below will minimize memory usage in the default installation; - # users who are sensitive to latency may consider setting this to "true". - # - # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore - # this option and behave as though it is set to "true". - # - # If this value is modified, then during the next Cilium startup the restore - # of existing endpoints and tracking of ongoing connections may be disrupted. - # This may lead to policy drops or a change in loadbalancing decisions for a - # connection for some time. Endpoints may need to be recreated to restore - # connectivity. - # - # If this option is set to "false" during an upgrade from 1.3 or earlier to - # 1.4 or later, then it may cause one-time disruptions during the upgrade. - preallocate-bpf-maps: "{{ .PreallocateBPFMaps }}" - # Regular expression matching compatible Istio sidecar istio-proxy - # container image names - sidecar-istio-proxy-image: "{{ .SidecarIstioProxyImage }}" - # Encapsulation mode for communication between nodes - # Possible values: - # - disabled - # - vxlan (default) - # - geneve - tunnel: "{{ .Tunnel }}" - - # Name of the cluster. Only relevant when building a mesh of clusters. - cluster-name: "{{ .ClusterName }}" - - # This option is disabled by default starting from version 1.4.x in favor - # of a more powerful DNS proxy-based implementation, see [0] for details. - # Enable this option if you want to use FQDN policies but do not want to use - # the DNS proxy. - # - # To ease upgrade, users may opt to set this option to "true". - # Otherwise please refer to the Upgrade Guide [1] which explains how to - # prepare policy rules for upgrade. - # - # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based - # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action - tofqdns-enable-poller: "{{ .ToFqdnsEnablePoller }}" - # wait-bpf-mount makes init container wait until bpf filesystem is mounted - wait-bpf-mount: "false" - # Enable fetching of container-runtime specific metadata - # - # By default, the Kubernetes pod and namespace labels are retrieved and - # associated with endpoints for identification purposes. By integrating - # with the container runtime, container runtime specific labels can be - # retrieved, such labels will be prefixed with container: - # - # CAUTION: The container runtime labels can include information such as pod - # annotations which may result in each pod being associated a unique set of - # labels which can result in excessive security identities being allocated. - # Please review the labels filter when enabling container runtime labels. - # - # Supported values: - # - containerd - # - crio - # - docker - # - none - # - auto (automatically detect the container runtime) - # - container-runtime: "{{ .ContainerRuntimeLabels }}" - masquerade: "{{- if .DisableMasquerade -}}false{{- else -}}true{{- end -}}" - install-iptables-rules: "{{- if .IPTablesRulesNoinstall -}}false{{- else -}}true{{- end -}}" - auto-direct-node-routes: "{{- if .AutoDirectNodeRoutes -}}true{{- else -}}false{{- end -}}" - enable-node-port: "{{ .EnableNodePort }}" - {{ with .Ipam }} - ipam: {{ . }} - {{ if eq . "eni" }} - enable-endpoint-routes: "true" - auto-create-cilium-node-resource: "true" - blacklist-conflicting-routes: "false" - {{ end }} - {{ end }} -{{ end }} # With .Networking.Cilium end ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: cilium-operator - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - namespaces - - services - - nodes - - endpoints - - componentstatuses - verbs: - - get - - list - - watch -- apiGroups: - - "" - resources: - - pods - - nodes - verbs: - - get - - list - - watch - - update -- apiGroups: - - "" - resources: - - nodes - - nodes/status - verbs: - - patch -- apiGroups: - - extensions - resources: - - ingresses - verbs: - - create - - get - - list - - watch -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - create - - get - - list - - watch - - update -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumendpoints - - ciliumendpoints/status - - ciliumnodes - - ciliumnodes/status - - ciliumidentities - - ciliumidentities/status - verbs: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: cilium-operator - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - # to automatically delete [core|kube]dns pods so that are starting to being - # managed by Cilium - - pods - verbs: - - get - - list - - watch - - delete -- apiGroups: - - "" - resources: - # to automatically read from k8s and import the node's pod CIDR to cilium's - # etcd so all nodes know how to reach another pod running in a different - # node. - - nodes - # to perform the translation of a CNP that contains `ToGroup` to its endpoints - - services - - endpoints - # to check apiserver connectivity - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - cilium.io - resources: - - ciliumnetworkpolicies - - ciliumnetworkpolicies/status - - ciliumendpoints - - ciliumendpoints/status - - ciliumnodes - - ciliumnodes/status - - ciliumidentities - - ciliumidentities/status - verbs: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium -subjects: -- kind: ServiceAccount - name: cilium - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cilium-operator - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cilium-operator -subjects: -- kind: ServiceAccount - name: cilium-operator - namespace: kube-system ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - role.kubernetes.io/networking: "1" - name: cilium - namespace: kube-system -spec: - selector: - matchLabels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - template: - metadata: - annotations: - # This annotation plus the CriticalAddonsOnly toleration makes - # cilium to be a critical pod in the cluster, which ensures cilium - # gets priority scheduling. - # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ - scheduler.alpha.kubernetes.io/critical-pod: "" - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' - labels: - k8s-app: cilium - kubernetes.io/cluster-service: "true" - spec: - containers: - - args: - - --config-dir=/tmp/cilium/config-map - command: - - cilium-agent - env: - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: CILIUM_FLANNEL_MASTER_DEVICE - valueFrom: - configMapKeyRef: - key: flannel-master-device - name: cilium-config - optional: true - - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT - valueFrom: - configMapKeyRef: - key: flannel-uninstall-on-exit - name: cilium-config - optional: true - - name: CILIUM_CLUSTERMESH_CONFIG - value: /var/lib/cilium/clustermesh/ - - name: CILIUM_CNI_CHAINING_MODE - valueFrom: - configMapKeyRef: - key: cni-chaining-mode - name: cilium-config - optional: true - - name: CILIUM_CUSTOM_CNI_CONF - valueFrom: - configMapKeyRef: - key: custom-cni-conf - name: cilium-config - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ .MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" - {{ with .Networking.Cilium.EnablePolicy }} - - name: CILIUM_ENABLE_POLICY - value: {{ . }} - {{ end }} -{{ with .Networking.Cilium }} - image: "docker.io/cilium/cilium:{{ .Version }}" - imagePullPolicy: IfNotPresent - lifecycle: - postStart: - exec: - command: - - /cni-install.sh - preStop: - exec: - command: - - /cni-uninstall.sh - livenessProbe: - exec: - command: - - cilium - - status - - --brief - failureThreshold: 10 - # The initial delay for the liveness probe is intentionally large to - # avoid an endless kill & restart cycle if in the event that the initial - # bootstrapping takes longer than expected. - initialDelaySeconds: 120 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - name: cilium-agent - {{ if .EnablePrometheusMetrics }} - ports: - - containerPort: {{ .AgentPrometheusPort }} - hostPort: {{ .AgentPrometheusPort }} - name: prometheus - protocol: TCP - {{ end }} - readinessProbe: - exec: - command: - - cilium - - status - - --brief - failureThreshold: 3 - initialDelaySeconds: 5 - periodSeconds: 30 - successThreshold: 1 - timeoutSeconds: 5 - securityContext: - capabilities: - add: - - NET_ADMIN - - SYS_MODULE - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - - mountPath: /var/run/cilium - name: cilium-run - - mountPath: /host/opt/cni/bin - name: cni-path - - mountPath: /host/etc/cni/net.d - name: etc-cni-netd - - mountPath: /var/lib/cilium/clustermesh - name: clustermesh-secrets - readOnly: true - - mountPath: /tmp/cilium/config-map - name: cilium-config-path - readOnly: true - # Needed to be able to load kernel modules - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - hostNetwork: true - initContainers: - - command: - - /init-container.sh - env: - - name: CILIUM_ALL_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-state - name: cilium-config - optional: true - - name: CILIUM_BPF_STATE - valueFrom: - configMapKeyRef: - key: clean-cilium-bpf-state - name: cilium-config - optional: true - - name: CILIUM_WAIT_BPF_MOUNT - valueFrom: - configMapKeyRef: - key: wait-bpf-mount - name: cilium-config - optional: true - image: "docker.io/cilium/cilium:{{ .Version }}" -## end of `with .Networking.Cilium` -#{{ end }} - imagePullPolicy: IfNotPresent - name: clean-cilium-state - securityContext: - capabilities: - add: - - NET_ADMIN - privileged: true - volumeMounts: - - mountPath: /sys/fs/bpf - name: bpf-maps - - mountPath: /var/run/cilium - name: cilium-run - restartPolicy: Always - serviceAccount: cilium - serviceAccountName: cilium - terminationGracePeriodSeconds: 1 - tolerations: - - operator: Exists - volumes: - # To keep state between restarts / upgrades - - hostPath: - path: /var/run/cilium - type: DirectoryOrCreate - name: cilium-run - # To keep state between restarts / upgrades for bpf maps - - hostPath: - path: /sys/fs/bpf - type: DirectoryOrCreate - name: bpf-maps - # To install cilium cni plugin in the host - - hostPath: - path: /opt/cni/bin - type: DirectoryOrCreate - name: cni-path - # To install cilium cni configuration in the host - - hostPath: - path: /etc/cni/net.d - type: DirectoryOrCreate - name: etc-cni-netd - # To be able to load kernel modules - - hostPath: - path: /lib/modules - name: lib-modules - # To access iptables concurrently with other processes (e.g. kube-proxy) - - hostPath: - path: /run/xtables.lock - type: FileOrCreate - name: xtables-lock - # To read the clustermesh configuration - - name: clustermesh-secrets - secret: - defaultMode: 420 - optional: true - secretName: cilium-clustermesh - # To read the configuration from the config map - - configMap: - name: cilium-config - name: cilium-config-path - updateStrategy: - rollingUpdate: - maxUnavailable: 2 - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - io.cilium/app: operator - name: cilium-operator - role.kubernetes.io/networking: "1" - name: cilium-operator - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - io.cilium/app: operator - name: cilium-operator - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - io.cilium/app: operator - name: cilium-operator - spec: - containers: - - args: - - --debug=$(CILIUM_DEBUG) -{{ with .Networking.Cilium }} - {{ if .EnablePrometheusMetrics }} - - --enable-metrics - {{ end }} -{{ end }} - command: - - cilium-operator - env: - - name: CILIUM_K8S_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - - name: K8S_NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: CILIUM_DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: cilium-config - optional: true - - name: CILIUM_CLUSTER_NAME - valueFrom: - configMapKeyRef: - key: cluster-name - name: cilium-config - optional: true - - name: CILIUM_CLUSTER_ID - valueFrom: - configMapKeyRef: - key: cluster-id - name: cilium-config - optional: true - - name: CILIUM_IPAM - valueFrom: - configMapKeyRef: - key: ipam - name: cilium-config - optional: true - - name: CILIUM_DISABLE_ENDPOINT_CRD - valueFrom: - configMapKeyRef: - key: disable-endpoint-crd - name: cilium-config - optional: true - - name: CILIUM_KVSTORE - valueFrom: - configMapKeyRef: - key: kvstore - name: cilium-config - optional: true - - name: CILIUM_KVSTORE_OPT - valueFrom: - configMapKeyRef: - key: kvstore-opt - name: cilium-config - optional: true - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - key: AWS_ACCESS_KEY_ID - name: cilium-aws - optional: true - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - key: AWS_SECRET_ACCESS_KEY - name: cilium-aws - optional: true - - name: AWS_DEFAULT_REGION - valueFrom: - secretKeyRef: - key: AWS_DEFAULT_REGION - name: cilium-aws - optional: true - - name: KUBERNETES_SERVICE_HOST - value: "{{ .MasterInternalName }}" - - name: KUBERNETES_SERVICE_PORT - value: "443" -{{ with .Networking.Cilium }} - image: "docker.io/cilium/operator:{{ .Version }}" - imagePullPolicy: IfNotPresent - name: cilium-operator - {{ if .EnablePrometheusMetrics }} - ports: - - containerPort: 6942 - hostPort: 6942 - name: prometheus - protocol: TCP - {{ end }} - livenessProbe: - httpGet: - path: /healthz - port: 9234 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - timeoutSeconds: 3 - hostNetwork: true - restartPolicy: Always - serviceAccount: cilium-operator - serviceAccountName: cilium-operator - {{if eq .Ipam "eni" }} - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - effect: NoExecute - key: node.kubernetes.io/not-ready - operator: Exists - tolerationSeconds: 300 - - effect: NoExecute - key: node.kubernetes.io/unreachable - operator: Exists - tolerationSeconds: 300 - {{ end }} -{{ end }} \ No newline at end of file diff --git a/upup/models/cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template deleted file mode 100644 index c542ecfb30932..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.flannel/k8s-1.6.yaml.template +++ /dev/null @@ -1,163 +0,0 @@ -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel - labels: - role.kubernetes.io/networking: "1" -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system ---- -kind: ServiceAccount -apiVersion: v1 -metadata: - name: flannel - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - k8s-app: flannel - role.kubernetes.io/networking: "1" -data: - cni-conf.json: | - { - "name": "cbr0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "forceAddress": true, - "isDefaultGateway": true, - "hairpinMode": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] - } - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "{{ FlannelBackendType }}" - } - } ---- -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: kube-flannel-ds - namespace: kube-system - labels: - k8s-app: flannel - role.kubernetes.io/networking: "1" -spec: - template: - metadata: - labels: - tier: node - app: flannel - role.kubernetes.io/networking: "1" - spec: - hostNetwork: true - nodeSelector: - beta.kubernetes.io/arch: amd64 - serviceAccountName: flannel - tolerations: - - operator: Exists - initContainers: - - name: install-cni - image: quay.io/coreos/flannel:v0.11.0-amd64 - command: - - cp - args: - - -f - - /etc/kube-flannel/cni-conf.json - - /etc/cni/net.d/10-flannel.conflist - volumeMounts: - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - containers: - - name: kube-flannel - image: quay.io/coreos/flannel:v0.11.0-amd64 - command: - - "/opt/bin/flanneld" - - "--ip-masq" - - "--kube-subnet-mgr" - - "--iptables-resync={{- or .Networking.Flannel.IptablesResyncSeconds "5" }}" - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - resources: - limits: - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg diff --git a/upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml b/upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml deleted file mode 100644 index 5df99fb0462b1..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.kope.io/k8s-1.6.yaml +++ /dev/null @@ -1,104 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: kopeio-networking-agent - namespace: kube-system - labels: - k8s-addon: networking.kope.io - role.kubernetes.io/networking: "1" -spec: - template: - metadata: - labels: - name: kopeio-networking-agent - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' - spec: - hostPID: true - hostIPC: true - hostNetwork: true - containers: - - resources: - requests: - cpu: 50m - memory: 100Mi - limits: - memory: 100Mi - securityContext: - privileged: true - image: kopeio/networking-agent:1.0.20181028 - name: networking-agent - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - serviceAccountName: kopeio-networking-agent - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: lib-modules - hostPath: - path: /lib/modules - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kopeio-networking-agent - namespace: kube-system - labels: - k8s-addon: networking.kope.io - role.kubernetes.io/networking: "1" - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - labels: - k8s-addon: networking.kope.io - name: kopeio:networking-agent -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - patch -- apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch - ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - labels: - k8s-addon: networking.kope.io - name: kopeio:networking-agent -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kopeio:networking-agent -subjects: -- apiGroup: rbac.authorization.k8s.io - kind: User - name: system:serviceaccount:kube-system:kopeio-networking-agent diff --git a/upup/models/cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template b/upup/models/cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template deleted file mode 100644 index 67fdbc47bc3f8..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.kuberouter/k8s-1.6.yaml.template +++ /dev/null @@ -1,160 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-router-cfg - namespace: kube-system - labels: - tier: node - k8s-app: kube-router -data: - cni-conf.json: | - { - "name":"kubernetes", - "type":"bridge", - "bridge":"kube-bridge", - "isDefaultGateway":true, - "ipam": { - "type":"host-local" - } - } ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - labels: - k8s-app: kube-router - tier: node - name: kube-router - namespace: kube-system -spec: - template: - metadata: - labels: - k8s-app: kube-router - tier: node - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - containers: - - name: kube-router - image: cloudnativelabs/kube-router:v0.3.1 - args: - - --run-router=true - - --run-firewall=true - - --run-service-proxy=true - - --metrics-port=12013 - - --kubeconfig=/var/lib/kube-router/kubeconfig - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - livenessProbe: - httpGet: - path: /healthz - port: 20244 - initialDelaySeconds: 10 - periodSeconds: 3 - resources: - requests: - cpu: 100m - memory: 250Mi - securityContext: - privileged: true - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - readOnly: true - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kubeconfig - mountPath: /var/lib/kube-router/kubeconfig - readOnly: true - initContainers: - - name: install-cni - image: busybox - command: - - /bin/sh - - -c - - set -e -x; - if [ ! -f /etc/cni/net.d/10-kuberouter.conf ]; then - TMP=/etc/cni/net.d/.tmp-kuberouter-cfg; - cp /etc/kube-router/cni-conf.json ${TMP}; - mv ${TMP} /etc/cni/net.d/10-kuberouter.conf; - fi - volumeMounts: - - name: cni-conf-dir - mountPath: /etc/cni/net.d - - name: kube-router-cfg - mountPath: /etc/kube-router - hostNetwork: true - serviceAccountName: kube-router - tolerations: - - operator: Exists - volumes: - - hostPath: - path: /lib/modules - name: lib-modules - - hostPath: - path: /etc/cni/net.d - name: cni-conf-dir - - name: kubeconfig - hostPath: - path: /var/lib/kube-router/kubeconfig - - name: kube-router-cfg - configMap: - name: kube-router-cfg ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-router - namespace: kube-system ---- -# Kube-router roles -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: kube-router - namespace: kube-system -rules: - - apiGroups: [""] - resources: - - namespaces - - pods - - services - - nodes - - endpoints - verbs: - - get - - list - - watch - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["extensions"] - resources: - - networkpolicies - verbs: - - get - - list - - watch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: kube-router -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kube-router -subjects: -- kind: ServiceAccount - name: kube-router - namespace: kube-system -- kind: User - name: system:kube-router diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template deleted file mode 100644 index c448162292f0b..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org.canal/k8s-1.9.yaml.template +++ /dev/null @@ -1,543 +0,0 @@ -# Canal Version v3.2.3 -# https://docs.projectcalico.org/v3.2/releases#v3.2.3 -# This manifest includes the following component versions: -# calico/node:v3.2.3 -# calico/cni:v3.2.3 -# coreos/flannel:v0.9.0 - -# This ConfigMap is used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{ .NonMasqueradeCIDR }}", - "Backend": { - "Type": "vxlan" - } - } - ---- - - - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - nodeSelector: - beta.kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Make sure canal gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: canal - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v3.2.3 - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # No IP address needed. - - name: IP - value: "" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Canal.LogSeveritySys "INFO" }}" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "{{- or .Networking.Canal.DefaultEndpointToHostAction "ACCEPT" }}" - # Controls whether Felix inserts rules to the top of iptables chains, or appends to the bottom - - name: FELIX_CHAININSERTMODE - value: "{{- or .Networking.Canal.ChainInsertMode "insert" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Canal.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Canal.PrometheusProcessMetricsEnabled "true" }}" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - host: localhost - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v3.2.3 - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-canal.conflist" - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: quay.io/coreos/flannel:v0.9.0 - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: calico -rules: - - apiGroups: [""] - resources: - - namespaces - - serviceaccounts - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - patch - - apiGroups: [""] - resources: - - services - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - verbs: - - get - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - globalnetworksets - - hostendpoints - - bgpconfigurations - - ippools - - globalnetworkpolicies - - networkpolicies - - clusterinformations - verbs: - - create - - get - - list - - update - - watch - ---- - -# Flannel roles -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- - -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - ---- - -# Bind the ClusterRole to the canal ServiceAccount. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - ---- - -# Create all the CustomResourceDefinitions needed for -# Calico policy and networking mode. - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template deleted file mode 100644 index 120cf6342b0aa..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7-v3.yaml.template +++ /dev/null @@ -1,749 +0,0 @@ -{{- $etcd_scheme := EtcdScheme }} -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The calico-etcd PetSet service IP:port - etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} - {{- range $j, $member := $cluster.Members -}} - {{- if $j }},{{ end -}} - {{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001 - {{- end }}" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.1", - "plugins": [ - { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - {{- if eq $etcd_scheme "https" }} - "etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem", - "etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem", - "etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem", - "etcd_scheme": "https", - {{- end }} - "log_level": "info", - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -rules: - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -rules: - - apiGroups: - - "" - - extensions - resources: - - pods - - namespaces - - networkpolicies - - nodes - verbs: - - watch - - list - - apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - watch - - list ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-kube-controllers - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: calico/node:v3.8.0 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within `--cluster-cidr`. - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubeControllerManager.ClusterCIDR }}" - - name: CALICO_IPV4POOL_IPIP - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}CrossSubnet{{- else -}}Always{{- end -}}" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to the desired level - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 10m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node - - -bird-ready - - -felix-ready - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: calico/cni:v3.8.0 - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - resources: - requests: - cpu: 10m - initContainers: - - name: migrate - image: calico/upgrade:v1.0.5 - command: ['/bin/sh', '-c', '/node-init-container.sh'] - env: - - name: CALICO_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - - name: CALICO_APIV1_DATASTORE_TYPE - value: "etcdv2" - - name: CALICO_APIV1_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: CALICO_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_ETCD_CA_CERT_FILE - value: /certs/ca.pem - - name: CALICO_APIV1_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_APIV1_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_APIV1_ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Necessary for gossip based DNS - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - ---- - -# This manifest deploys the Calico Kubernetes controllers. -# See https://github.com/projectcalico/kube-controllers -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' -spec: - # The controllers can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - spec: - # The controllers must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule - serviceAccountName: calico-kube-controllers - containers: - - name: calico-kube-controllers - image: calico/kube-controllers:v3.8.0 - resources: - requests: - cpu: 10m - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - volumeMounts: - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - readinessProbe: - exec: - command: - - /usr/bin/check-status - - -r - initContainers: - - name: migrate - image: calico/upgrade:v1.0.5 - command: ['/bin/sh', '-c', '/controller-init.sh'] - env: - - name: CALICO_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - - name: CALICO_APIV1_DATASTORE_TYPE - value: "etcdv2" - - name: CALICO_APIV1_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: CALICO_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_ETCD_CA_CERT_FILE - value: /certs/ca.pem - - name: CALICO_APIV1_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_APIV1_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_APIV1_ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - # Necessary for gossip based DNS - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - -# This manifest runs the Migration complete container that monitors for the -# completion of the calico-node Daemonset rollout and when it finishes -# successfully rolling out it will mark the migration complete and allow pods -# to be created again. ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-upgrade-job - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-upgrade-job - labels: - role.kubernetes.io/networking: "1" -rules: - - apiGroups: - - extensions - resources: - - daemonsets - - daemonsets/status - verbs: - - get - - list - - watch ---- - -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-upgrade-job - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-upgrade-job -subjects: -- kind: ServiceAccount - name: calico-upgrade-job - namespace: kube-system ---- -# If anything in this job is changed then the name of the job -# should be changed because Jobs cannot be updated, so changing -# the name would run a different Job if the previous version had been -# created before and it does not hurt to rerun this job. - -apiVersion: batch/v1 -kind: Job -metadata: - name: calico-complete-upgrade-v331 - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" -spec: - template: - metadata: - labels: - role.kubernetes.io/networking: "1" - spec: - hostNetwork: true - serviceAccountName: calico-upgrade-job - restartPolicy: OnFailure - containers: - - name: migrate-completion - image: calico/upgrade:v1.0.5 - command: ['/bin/sh', '-c', '/completion-job.sh'] - env: - - name: EXPECTED_NODE_IMAGE - value: quay.io/calico/node:v3.7.4 - # The location of the Calico etcd cluster. - - name: CALICO_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - - name: CALICO_APIV1_DATASTORE_TYPE - value: "etcdv2" - - name: CALICO_APIV1_ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: CALICO_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_ETCD_CA_CERT_FILE - value: /certs/ca.pem - - name: CALICO_APIV1_ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: CALICO_APIV1_ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: CALICO_APIV1_ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - -{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} -# This manifest installs the k8s-ec2-srcdst container, which disables -# src/dst ip checks to allow BGP to function for calico for hosts within subnets -# This only applies for AWS environments. ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update - - patch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-ec2-srcdst -subjects: -- kind: ServiceAccount - name: k8s-ec2-srcdst - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: k8s-ec2-srcdst - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.2.1 - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - node-role.kubernetes.io/master: "" -{{- end -}} diff --git a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template b/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template deleted file mode 100644 index 2aeaa567b7bc5..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.projectcalico.org/k8s-1.7.yaml.template +++ /dev/null @@ -1,550 +0,0 @@ -{{- $etcd_scheme := EtcdScheme }} -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # The calico-etcd PetSet service IP:port - etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} - {{- range $j, $member := $cluster.Members -}} - {{- if $j }},{{ end -}} - {{ $etcd_scheme }}://etcd-{{ $member.Name }}.internal.{{ ClusterName }}:4001 - {{- end }}" - - # Configure the Calico backend to use. - calico_backend: "bird" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "etcd_endpoints": "__ETCD_ENDPOINTS__", - {{- if eq $etcd_scheme "https" }} - "etcd_ca_cert_file": "/srv/kubernetes/calico/ca.pem", - "etcd_cert_file": "/srv/kubernetes/calico/calico-client.pem", - "etcd_key_file": "/srv/kubernetes/calico/calico-client-key.pem", - "etcd_scheme": "https", - {{- end }} - "log_level": "info", - {{- if .Networking.Calico.MTU }} - "mtu": {{- or .Networking.Calico.MTU }}, - {{- end }} - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s", - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: calico - namespace: kube-system - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - template: - metadata: - labels: - k8s-app: calico-node - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - serviceAccountName: calico - tolerations: - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: quay.io/calico/node:v2.6.12 - resources: - requests: - cpu: 10m - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - # Enable BGP. Disable to enforce policy only. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Configure the IP Pool from which Pod IPs will be chosen. - - name: CALICO_IPV4POOL_CIDR - value: "{{ .KubeControllerManager.ClusterCIDR }}" - - name: CALICO_IPV4POOL_IPIP - value: "{{- if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}cross-subnet{{- else -}}always{{- end -}}" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "kops,bgp" - # Disable file logging so `kubectl logs` works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set noderef for node controller. - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Auto-detect the BGP IP address. - - name: IP - value: "" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to the desired level - - name: FELIX_LOGSEVERITYSCREEN - value: "{{- or .Networking.Calico.LogSeverityScreen "info" }}" - # Set to enable the experimental Prometheus metrics server - - name: FELIX_PROMETHEUSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusMetricsEnabled "false" }}" - # TCP port that the Prometheus metrics server should bind to - - name: FELIX_PROMETHEUSMETRICSPORT - value: "{{- or .Networking.Calico.PrometheusMetricsPort "9091" }}" - # Enable Prometheus Go runtime metrics collection - - name: FELIX_PROMETHEUSGOMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusGoMetricsEnabled "true" }}" - # Enable Prometheus process metrics collection - - name: FELIX_PROMETHEUSPROCESSMETRICSENABLED - value: "{{- or .Networking.Calico.PrometheusProcessMetricsEnabled "true" }}" - - name: FELIX_HEALTHENABLED - value: "true" - {{- if .Networking.Calico.MTU }} - - name: FELIX_IPINIPMTU - value: "{{- or .Networking.Calico.MTU }}" - {{- end}} - securityContext: - privileged: true - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: quay.io/calico/cni:v1.11.8 - resources: - requests: - cpu: 10m - imagePullPolicy: Always - command: ["/install-cni.sh"] - env: - # The name of calico config file - - name: CNI_CONF_NAME - value: 10-calico.conflist - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - ---- - -# This manifest deploys the Calico Kubernetes controllers. -# See https://github.com/projectcalico/kube-controllers -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" -spec: - # The controllers can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - # The controllers must run in the host network namespace so that - # it isn't governed by policy that would prevent it from working. - hostNetwork: true - serviceAccountName: calico - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - containers: - - name: calico-kube-controllers - image: quay.io/calico/kube-controllers:v1.0.5 - resources: - requests: - cpu: 10m - env: - # By default only policy, profile, workloadendpoint are turned - # on, node controller will decommission nodes that do not exist anymore - # this and CALICO_K8S_NODE_REF in calico-node fixes #3224, but invalid nodes that are - # already registered in calico needs to be deleted manually, see - # https://docs.projectcalico.org/v2.6/usage/decommissioning-a-node - - name: ENABLED_CONTROLLERS - value: policy,profile,workloadendpoint,node - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{- end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} ---- - -# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then -# be removed entirely once the new kube-controllers deployment has been deployed above. -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy -spec: - # Turn this deployment off in favor of the kube-controllers deployment above. - replicas: 0 - strategy: - type: Recreate - template: - metadata: - name: calico-policy-controller - namespace: kube-system - labels: - k8s-app: calico-policy - spec: - hostNetwork: true - serviceAccountName: calico - containers: - - name: calico-policy-controller - # This shouldn't get updated, since this is the last version we shipped that should be used. - image: quay.io/calico/kube-policy-controller:v0.7.0 - env: - # The location of the Calico etcd cluster. - - name: ETCD_ENDPOINTS - valueFrom: - configMapKeyRef: - name: calico-config - key: etcd_endpoints - {{- if eq $etcd_scheme "https" }} - - name: ETCD_CERT_FILE - value: /certs/calico-client.pem - - name: ETCD_KEY_FILE - value: /certs/calico-client-key.pem - - name: ETCD_CA_CERT_FILE - value: /certs/ca.pem - {{- end }} - volumeMounts: - # Necessary for gossip based DNS - - mountPath: /etc/hosts - name: etc-hosts - readOnly: true - {{- if eq $etcd_scheme "https" }} - - mountPath: /certs - name: calico - readOnly: true - {{ end }} - volumes: - - name: etc-hosts - hostPath: - path: /etc/hosts - {{- if eq $etcd_scheme "https" }} - - name: calico - hostPath: - path: /srv/kubernetes/calico - {{- end }} - -{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} -# This manifest installs the k8s-ec2-srcdst container, which disables -# src/dst ip checks to allow BGP to function for calico for hosts within subnets -# This only applies for AWS environments. ---- - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -rules: -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch - - update - - patch - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - role.kubernetes.io/networking: "1" ---- - -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: k8s-ec2-srcdst - labels: - role.kubernetes.io/networking: "1" -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: k8s-ec2-srcdst -subjects: -- kind: ServiceAccount - name: k8s-ec2-srcdst - namespace: kube-system - ---- - -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: k8s-ec2-srcdst - namespace: kube-system - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: k8s-ec2-srcdst - template: - metadata: - labels: - k8s-app: k8s-ec2-srcdst - role.kubernetes.io/networking: "1" - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - serviceAccountName: k8s-ec2-srcdst - containers: - - image: ottoyiu/k8s-ec2-srcdst:v0.2.2 - name: k8s-ec2-srcdst - resources: - requests: - cpu: 10m - memory: 64Mi - env: - - name: AWS_REGION - value: {{ Region }} - volumeMounts: - - name: ssl-certs - mountPath: "/etc/ssl/certs/ca-certificates.crt" - readOnly: true - imagePullPolicy: "Always" - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs/ca-certificates.crt" - nodeSelector: - node-role.kubernetes.io/master: "" -{{- end -}} diff --git a/upup/models/cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template b/upup/models/cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template deleted file mode 100644 index 1b0c0c74a6dcb..0000000000000 --- a/upup/models/cloudup/resources/addons/networking.weave/k8s-1.9.yaml.template +++ /dev/null @@ -1,284 +0,0 @@ -# Pulled and modified from: https://github.com/weaveworks/weave/releases/download/v2.7.0/weave-daemonset-k8s-1.9.yaml - -{{- if WeaveSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: weave-net - namespace: kube-system -stringData: - network-password: {{ WeaveSecret }} ---- -{{- end }} - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: weave-net - labels: - name: weave-net - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: weave-net - labels: - name: weave-net - role.kubernetes.io/networking: "1" - namespace: kube-system -rules: - - apiGroups: - - '' - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch - - apiGroups: - - extensions - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - 'networking.k8s.io' - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - '' - resources: - - nodes/status - verbs: - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: weave-net - labels: - name: weave-net - role.kubernetes.io/networking: "1" - namespace: kube-system -roleRef: - kind: ClusterRole - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net -rules: - - apiGroups: - - '' - resources: - - configmaps - resourceNames: - - weave-net - verbs: - - get - - update - - apiGroups: - - '' - resources: - - configmaps - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: weave-net - namespace: kube-system - labels: - name: weave-net -roleRef: - kind: Role - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: weave-net - labels: - name: weave-net - role.kubernetes.io/networking: "1" - namespace: kube-system -spec: - # Wait 5 seconds to let pod connect before rolling next pod - selector: - matchLabels: - name: weave-net - minReadySeconds: 5 - template: - metadata: - labels: - name: weave-net - role.kubernetes.io/networking: "1" - annotations: - prometheus.io/scrape: "true" - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - containers: - - name: weave - command: - - /home/weave/launch.sh - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: IPALLOC_RANGE - value: {{ .KubeControllerManager.ClusterCIDR }} - {{- if .Networking.Weave.MTU }} - - name: WEAVE_MTU - value: "{{ .Networking.Weave.MTU }}" - {{- end }} - {{- if .Networking.Weave.NoMasqLocal }} - - name: NO_MASQ_LOCAL - value: "{{ .Networking.Weave.NoMasqLocal }}" - {{- end }} - {{- if .Networking.Weave.ConnLimit }} - - name: CONN_LIMIT - value: "{{ .Networking.Weave.ConnLimit }}" - {{- end }} - {{- if .Networking.Weave.NetExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NetExtraArgs }}" - {{- end }} - {{- if WeaveSecret }} - - name: WEAVE_PASSWORD - valueFrom: - secretKeyRef: - name: weave-net - key: network-password - {{- end }} - image: 'weaveworks/weave-kube:2.7.0' - ports: - - name: metrics - containerPort: 6782 - readinessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - resources: - requests: - cpu: {{ or .Networking.Weave.CPURequest "50m" }} - memory: {{ or .Networking.Weave.MemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.CPULimit }} - cpu: {{ .Networking.Weave.CPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.MemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - - name: weave-npc - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - {{- if .Networking.Weave.NPCExtraArgs }} - - name: EXTRA_ARGS - value: "{{ .Networking.Weave.NPCExtraArgs }}" - {{- end }} - image: 'weaveworks/weave-npc:2.7.0' - ports: - - name: metrics - containerPort: 6781 - resources: - requests: - cpu: {{ or .Networking.Weave.NPCCPURequest "50m" }} - memory: {{ or .Networking.Weave.NPCMemoryRequest "200Mi" }} - limits: - {{- if .Networking.Weave.NPCCPULimit }} - cpu: {{ .Networking.Weave.NPCCPULimit }} - {{- end }} - memory: {{ or .Networking.Weave.NPCMemoryLimit "200Mi" }} - securityContext: - privileged: true - volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - - key: CriticalAddonsOnly - operator: Exists - volumes: - - name: weavedb - hostPath: - path: /var/lib/weave - - name: cni-bin - hostPath: - path: /opt - - name: cni-bin2 - hostPath: - path: /home - - name: cni-conf - hostPath: - path: /etc - - name: dbus - hostPath: - path: /var/lib/dbus - - name: lib-modules - hostPath: - path: /lib/modules - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - updateStrategy: - type: RollingUpdate diff --git a/upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template b/upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template deleted file mode 100644 index 4b14d2aa72e92..0000000000000 --- a/upup/models/cloudup/resources/addons/node-authorizer.addons.k8s.io/k8s-1.10.yaml.template +++ /dev/null @@ -1,188 +0,0 @@ -{{- $proxy := .EgressProxy }} -{{- $na := .NodeAuthorization.NodeAuthorizer }} -{{- $name := "node-authorizer" }} -{{- $namespace := "kube-system" }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ $name }} - namespace: {{ $namespace }} - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: kops:{{ $name }}:nodes-viewer - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -rules: -- apiGroups: - - "*" - resources: - - nodes - verbs: - - get - - list ---- -# permits the node access to create a CSR -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kops:{{ $name }}:system:bootstrappers - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -roleRef: - kind: ClusterRole - name: system:node-bootstrapper - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: Group - name: system:bootstrappers - apiGroup: rbac.authorization.k8s.io ---- -# indicates to the controller to auto-sign the CSR for this group -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: kops:{{ $name }}:approval - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -roleRef: - kind: ClusterRole - name: system:certificates.k8s.io:certificatesigningrequests:nodeclient - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: Group - name: system:bootstrappers - apiGroup: rbac.authorization.k8s.io ---- -# the service permission requires to create the bootstrap tokens -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: kops:{{ $namespace }}:{{ $name }} - namespace: {{ $namespace }} - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -rules: -- apiGroups: - - "*" - resources: - - secrets - verbs: - - create - - list ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: kops:{{ $namespace }}:{{ $name }} - namespace: {{ $namespace }} - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: kops:{{ $namespace }}:{{ $name }} -subjects: -- kind: ServiceAccount - name: {{ $name }} - namespace: {{ $namespace }} ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: kops:{{ $name }}:nodes-viewer - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: kops:{{ $name }}:nodes-viewer -subjects: -- kind: ServiceAccount - name: {{ $name }} - namespace: {{ $namespace }} ---- -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: {{ $name }} - namespace: {{ $namespace }} - labels: - k8s-app: {{ $name }} - k8s-addon: {{ $name }}.addons.k8s.io -spec: - selector: - matchLabels: - k8s-app: {{ $name }} - template: - metadata: - labels: - k8s-app: {{ $name }} - annotations: - dns.alpha.kubernetes.io/internal: {{ $name }}-internal.{{ ClusterName }} - prometheus.io/port: "{{ $na.Port }}" - prometheus.io/scheme: "https" - prometheus.io/scrape: "true" - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - hostNetwork: true - nodeSelector: - kubernetes.io/role: master - serviceAccount: {{ $name }} - securityContext: - fsGroup: 1000 - tolerations: - - key: "node-role.kubernetes.io/master" - effect: NoSchedule - volumes: - - name: config - hostPath: - path: /srv/kubernetes/node-authorizer - type: DirectoryOrCreate - containers: - - name: {{ $name }} - image: {{ $na.Image }} - args: - - server - - --authorization-timeout={{ $na.Timeout.Duration }} - - --authorizer={{ $na.Authorizer }} - - --cluster-name={{ ClusterName }} - {{- range $na.Features }} - - --feature={{ . }} - {{- end }} - - --listen=0.0.0.0:{{ $na.Port }} - - --tls-cert=/config/tls.pem - - --tls-client-ca=/config/ca.pem - - --tls-private-key=/config/tls-key.pem - - --token-ttl={{ $na.TokenTTL.Duration }} - {{- if $proxy }} - env: - - name: http_proxy - value: {{ $proxy.HTTPProxy.Host }}:{{ $proxy.HTTPProxy.Port }} - {{- if $proxy.ProxyExcludes }} - - name: no_proxy - value: {{ $proxy.ProxyExcludes }} - {{- end }} - {{- end }} - resources: - limits: - cpu: 100m - memory: 64Mi - requests: - cpu: 10m - memory: 10Mi - volumeMounts: - - mountPath: /config - readOnly: true - name: config diff --git a/upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template b/upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template deleted file mode 100644 index 49dac22493ec3..0000000000000 --- a/upup/models/cloudup/resources/addons/podsecuritypolicy.addons.k8s.io/k8s-1.10.yaml.template +++ /dev/null @@ -1,82 +0,0 @@ ---- -apiVersion: extensions/v1beta1 -kind: PodSecurityPolicy -metadata: - annotations: - k8s-addon: podsecuritypolicy.addons.k8s.io - name: kube-system -spec: - allowedCapabilities: - - '*' - fsGroup: - rule: RunAsAny - hostPID: true - hostIPC: true - hostNetwork: true - hostPorts: - - min: 1 - max: 65536 - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - volumes: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - annotations: - k8s-addon: podsecuritypolicy.addons.k8s.io - name: kops:kube-system:psp -rules: -- apiGroups: - - policy - resources: - - podsecuritypolicies - resourceNames: - - kube-system - verbs: - - use ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: kops:kube-system:psp -roleRef: - kind: ClusterRole - name: kops:kube-system:psp - apiGroup: rbac.authorization.k8s.io -subjects: -- kind: Group - name: system:masters - apiGroup: rbac.authorization.k8s.io -# permit the kubelets to access this policy (used for manifests) -- kind: User - name: kubelet - apiGroup: rbac.authorization.k8s.io -{{- if UseBootstrapTokens }} -- kind: Group - name: system:nodes - apiGroup: rbac.authorization.k8s.io -{{- end }} ---- -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - annotations: - k8s-addon: podsecuritypolicy.addons.k8s.io - name: kops:kube-system:psp - namespace: kube-system -roleRef: - kind: ClusterRole - name: kops:kube-system:psp - apiGroup: rbac.authorization.k8s.io -subjects: -# permit the cluster wise admin to use this policy -- kind: Group - name: system:serviceaccounts:kube-system - apiGroup: rbac.authorization.k8s.io diff --git a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go index 408feccd3befa..809ef9a71c565 100644 --- a/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go +++ b/upup/pkg/fi/cloudup/bootstrapchannelbuilder.go @@ -226,31 +226,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "podsecuritypolicy.addons.k8s.io" version := "0.0.4" - { - location := key + "/k8s-1.10.yaml" - id := "k8s-1.10" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.10.0 <1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -260,31 +245,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "node-authorizer.addons.k8s.io" version := "v0.0.4-kops.2" - { - location := key + "/k8s-1.10.yaml" - id := "k8s-1.10.yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.10.0 <1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12.yaml" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -297,56 +267,22 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "kube-dns.addons.k8s.io" version := "1.15.13-kops.3" - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } } if kubeDNS.Provider == "CoreDNS" { - { - key := "coredns.addons.k8s.io" - version := "1.7.0-kops.1" - - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - } - { key := "coredns.addons.k8s.io" version := "1.7.0-kops.2" @@ -356,12 +292,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -441,31 +376,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "dns-controller.addons.k8s.io" version := "1.19.0-alpha.5" - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -489,31 +409,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "external-dns.addons.k8s.io" version := "0.4.5-kops.1" - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -531,12 +436,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -573,12 +477,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.11" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-app": "metrics-server"}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.11.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-app": "metrics-server"}, + Manifest: fi.String(location), + Id: id, }) } } @@ -596,12 +499,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.11" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.11.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } @@ -751,31 +653,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "networking.kope.io" version := "1.0.20181028-kops.2" - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: networkingSelector, + Manifest: fi.String(location), + Id: id, }) } } @@ -783,35 +670,19 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann if b.Cluster.Spec.Networking.Weave != nil { key := "networking.weave" versions := map[string]string{ - "k8s-1.9": "2.7.0-kops.1", "k8s-1.12": "2.7.0-kops.1", } - { - location := key + "/k8s-1.9.yaml" - id := "k8s-1.9" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(versions[id]), + Selector: networkingSelector, + Manifest: fi.String(location), + Id: id, }) } } @@ -819,35 +690,19 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann if b.Cluster.Spec.Networking.Flannel != nil { key := "networking.flannel" versions := map[string]string{ - "k8s-1.6": "0.11.0-kops.2", "k8s-1.12": "0.13.0-kops.1", } - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(versions[id]), + Selector: networkingSelector, + Manifest: fi.String(location), + Id: id, }) } } @@ -855,10 +710,8 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann if b.Cluster.Spec.Networking.Calico != nil { key := "networking.projectcalico.org" versions := map[string]string{ - "k8s-1.7": "2.6.12-kops.1", - "k8s-1.7-v3": "3.8.0-kops.2", - "k8s-1.12": "3.9.6-kops.1", - "k8s-1.16": "3.16.3-kops.2", + "k8s-1.12": "3.9.6-kops.1", + "k8s-1.16": "3.16.3-kops.2", } { @@ -870,7 +723,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0 <1.16.0", + KubernetesVersion: "<1.16.0", Id: id, }) } @@ -888,59 +741,15 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann Id: id, }) } - - if b.Cluster.Spec.Networking.Calico.MajorVersion == "v3" { - { - id := "k8s-1.7-v3" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - } else { - { - id := "k8s-1.7" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - } } if b.Cluster.Spec.Networking.Canal != nil { key := "networking.projectcalico.org.canal" versions := map[string]string{ - "k8s-1.9": "3.2.3-kops.1", "k8s-1.12": "3.7.5-kops.2", "k8s-1.15": "3.12.2-kops.1", "k8s-1.16": "3.13.4-kops.2", } - { - id := "k8s-1.9" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } { id := "k8s-1.12" location := key + "/" + id + ".yaml" @@ -950,7 +759,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0 <1.15.0", + KubernetesVersion: "<1.15.0", Id: id, }) } @@ -985,35 +794,19 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann if b.Cluster.Spec.Networking.Kuberouter != nil { key := "networking.kuberouter" versions := map[string]string{ - "k8s-1.6": "0.3.1-kops.4", "k8s-1.12": "1.0.1-kops.1", } - { - location := key + "/k8s-1.6.yaml" - id := "k8s-1.6" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(versions[id]), + Selector: networkingSelector, + Manifest: fi.String(location), + Id: id, }) } } @@ -1022,25 +815,10 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "networking.amazon-vpc-routed-eni" versions := map[string]string{ - "k8s-1.10": "1.5.0-kops.2", "k8s-1.12": "1.5.5-kops.1", "k8s-1.16": "1.7.5-kops.1", } - { - id := "k8s-1.10" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.10.0 <1.12.0", - Id: id, - }) - } - { id := "k8s-1.12" location := key + "/" + id + ".yaml" @@ -1050,7 +828,7 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann Version: fi.String(versions[id]), Selector: networkingSelector, Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0 <1.16.0", + KubernetesVersion: "<1.16.0", Id: id, }) } @@ -1080,31 +858,16 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann if ver.LT(v8) { version := "1.7.3-kops.1" - { - id := "k8s-1.7" - location := key + "/" + id + ".yaml" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { id := "k8s-1.12" location := key + "/" + id + ".yaml" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: networkingSelector, + Manifest: fi.String(location), + Id: id, }) } } else { @@ -1114,12 +877,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann location := key + "/" + id + "-v1.8.yaml" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: networkingSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: networkingSelector, + Manifest: fi.String(location), + Id: id, }) } } @@ -1132,66 +894,35 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann key := "authentication.kope.io" version := "1.0.20181028-kops.1" - { - location := key + "/k8s-1.8.yaml" - id := "k8s-1.8" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: authenticationSelector, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: authenticationSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: authenticationSelector, + Manifest: fi.String(location), + Id: id, }) } } if b.Cluster.Spec.Authentication.Aws != nil { key := "authentication.aws" versions := map[string]string{ - "k8s-1.10": "0.4.0-kops.2", "k8s-1.12": "0.5.1-kops.1", } - { - location := key + "/k8s-1.10.yaml" - id := "k8s-1.10" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: authenticationSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.10.0 <1.12.0", - Id: id, - }) - } - { location := key + "/k8s-1.12.yaml" id := "k8s-1.12" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(versions[id]), - Selector: authenticationSelector, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(versions[id]), + Selector: authenticationSelector, + Manifest: fi.String(location), + Id: id, }) } } @@ -1225,32 +956,14 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.13-ccm" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Manifest: fi.String(location), - Selector: map[string]string{"k8s-addon": key}, - KubernetesVersion: ">=1.13.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Manifest: fi.String(location), + Selector: map[string]string{"k8s-addon": key}, + Id: id, }) } } else { - { - key := "core.addons.k8s.io" - version := "1.7.0" - - location := key + "/k8s-1.7.yaml" - id := "k8s-1.7-ccm" - - addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: "<1.12.0", - Id: id, - }) - } - { key := "core.addons.k8s.io" version := "1.12.1-kops.1" @@ -1259,12 +972,11 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*chann id := "k8s-1.12-ccm" addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ - Name: fi.String(key), - Version: fi.String(version), - Selector: map[string]string{"k8s-addon": key}, - Manifest: fi.String(location), - KubernetesVersion: ">=1.12.0", - Id: id, + Name: fi.String(key), + Version: fi.String(version), + Selector: map[string]string{"k8s-addon": key}, + Manifest: fi.String(location), + Id: id, }) } } diff --git a/upup/pkg/fi/cloudup/populatecluster_test.go b/upup/pkg/fi/cloudup/populatecluster_test.go index e169c17f4cee3..771d3fd2979b2 100644 --- a/upup/pkg/fi/cloudup/populatecluster_test.go +++ b/upup/pkg/fi/cloudup/populatecluster_test.go @@ -377,11 +377,7 @@ func TestPopulateCluster_DockerVersion(t *testing.T) { DockerVersion string }{ { - KubernetesVersion: "1.11.0", - DockerVersion: "17.03.2", - }, - { - KubernetesVersion: "1.12.0", + KubernetesVersion: "1.13.0", DockerVersion: "18.06.3", }, { From 8254be29762692adeb391216a0d264b924bf123a Mon Sep 17 00:00:00 2001 From: John Gardiner Myers Date: Sat, 24 Oct 2020 23:10:59 -0700 Subject: [PATCH 2/2] update-expected.sh --- .../amazonvpc/manifest.yaml | 28 +------------------ .../awsiamauthenticator/manifest.yaml | 27 ------------------ .../cilium/manifest.yaml | 19 ------------- .../public-jwks/manifest.yaml | 18 ------------ .../simple/manifest.yaml | 18 ------------ .../weave/manifest.yaml | 27 ------------------ 6 files changed, 1 insertion(+), 136 deletions(-) diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml index e3747de7fe87f..73593f44c530f 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/amazonvpc/manifest.yaml @@ -18,16 +18,7 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml - manifestHash: a50e6a4c2f800b4af4ac0d80edf7762cfc1de9e3 - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.15.13-kops.3 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifestHash: db49c98447b9d59dec4fa413461a6614bc6e43e9 name: kube-dns.addons.k8s.io @@ -54,16 +45,7 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml - manifestHash: 4ca9a7bc88b72368ad5c4de5b39dfc5366118a81 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.19.0-alpha.5 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml manifestHash: e8240400cebfa9a338f8ff1f647e203fe43cbbcf name: dns-controller.addons.k8s.io @@ -86,16 +68,8 @@ spec: selector: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - - id: k8s-1.10 - kubernetesVersion: '>=1.10.0 <1.12.0' - manifest: networking.amazon-vpc-routed-eni/k8s-1.10.yaml - manifestHash: 7db692fb2edbb06e0f5c29c271ca819ce477280c - name: networking.amazon-vpc-routed-eni - selector: - role.kubernetes.io/networking: "1" - version: 1.5.0-kops.2 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0 <1.16.0' + kubernetesVersion: <1.16.0 manifest: networking.amazon-vpc-routed-eni/k8s-1.12.yaml manifestHash: 1b204a83ef58e8b268970861bb18ff2df597c86a name: networking.amazon-vpc-routed-eni diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/manifest.yaml index 369dc207f206b..97fa009257c38 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/awsiamauthenticator/manifest.yaml @@ -18,16 +18,7 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml - manifestHash: a50e6a4c2f800b4af4ac0d80edf7762cfc1de9e3 - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.15.13-kops.3 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifestHash: db49c98447b9d59dec4fa413461a6614bc6e43e9 name: kube-dns.addons.k8s.io @@ -47,16 +38,7 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml - manifestHash: 4ca9a7bc88b72368ad5c4de5b39dfc5366118a81 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.19.0-alpha.5 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml manifestHash: e8240400cebfa9a338f8ff1f647e203fe43cbbcf name: dns-controller.addons.k8s.io @@ -79,16 +61,7 @@ spec: selector: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - - id: k8s-1.10 - kubernetesVersion: '>=1.10.0 <1.12.0' - manifest: authentication.aws/k8s-1.10.yaml - manifestHash: 84cbe39ff9e48669837d5074b5c0048ae9fc1a8b - name: authentication.aws - selector: - role.kubernetes.io/authentication: "1" - version: 0.4.0-kops.2 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: authentication.aws/k8s-1.12.yaml manifestHash: 7a5100a7a4938565f3bd64decc8c7767f57ae2cc name: authentication.aws diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml index b9b06756cd49d..bcac1464cd8b3 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/cilium/manifest.yaml @@ -18,16 +18,7 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml - manifestHash: a50e6a4c2f800b4af4ac0d80edf7762cfc1de9e3 - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.15.13-kops.3 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifestHash: db49c98447b9d59dec4fa413461a6614bc6e43e9 name: kube-dns.addons.k8s.io @@ -54,16 +45,7 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml - manifestHash: 4ca9a7bc88b72368ad5c4de5b39dfc5366118a81 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.19.0-alpha.5 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml manifestHash: e8240400cebfa9a338f8ff1f647e203fe43cbbcf name: dns-controller.addons.k8s.io @@ -87,7 +69,6 @@ spec: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: networking.cilium.io/k8s-1.12-v1.8.yaml manifestHash: d3ec6a179bbf7de2fdc6d34190cc31819ece539b name: networking.cilium.io diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/public-jwks/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/public-jwks/manifest.yaml index 88c2d8a45ab26..a4407def80593 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/public-jwks/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/public-jwks/manifest.yaml @@ -26,16 +26,7 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml - manifestHash: a50e6a4c2f800b4af4ac0d80edf7762cfc1de9e3 - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.15.13-kops.3 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifestHash: db49c98447b9d59dec4fa413461a6614bc6e43e9 name: kube-dns.addons.k8s.io @@ -62,16 +53,7 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml - manifestHash: 4ca9a7bc88b72368ad5c4de5b39dfc5366118a81 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.19.0-alpha.5 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml manifestHash: 5bde4e91a731d201df8463bb87750b051a4a0cfb name: dns-controller.addons.k8s.io diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml index 21c8dfe44d43a..78f4a1b6ad1d1 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/simple/manifest.yaml @@ -18,16 +18,7 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml - manifestHash: a50e6a4c2f800b4af4ac0d80edf7762cfc1de9e3 - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.15.13-kops.3 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifestHash: db49c98447b9d59dec4fa413461a6614bc6e43e9 name: kube-dns.addons.k8s.io @@ -47,16 +38,7 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml - manifestHash: 4ca9a7bc88b72368ad5c4de5b39dfc5366118a81 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.19.0-alpha.5 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml manifestHash: e8240400cebfa9a338f8ff1f647e203fe43cbbcf name: dns-controller.addons.k8s.io diff --git a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml index f57644b3e7e43..e071de78b5ee6 100644 --- a/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml +++ b/upup/pkg/fi/cloudup/tests/bootstrapchannelbuilder/weave/manifest.yaml @@ -18,16 +18,7 @@ spec: selector: k8s-addon: core.addons.k8s.io version: 1.4.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: kube-dns.addons.k8s.io/k8s-1.6.yaml - manifestHash: a50e6a4c2f800b4af4ac0d80edf7762cfc1de9e3 - name: kube-dns.addons.k8s.io - selector: - k8s-addon: kube-dns.addons.k8s.io - version: 1.15.13-kops.3 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: kube-dns.addons.k8s.io/k8s-1.12.yaml manifestHash: db49c98447b9d59dec4fa413461a6614bc6e43e9 name: kube-dns.addons.k8s.io @@ -54,16 +45,7 @@ spec: selector: k8s-addon: limit-range.addons.k8s.io version: 1.5.0 - - id: k8s-1.6 - kubernetesVersion: <1.12.0 - manifest: dns-controller.addons.k8s.io/k8s-1.6.yaml - manifestHash: 4ca9a7bc88b72368ad5c4de5b39dfc5366118a81 - name: dns-controller.addons.k8s.io - selector: - k8s-addon: dns-controller.addons.k8s.io - version: 1.19.0-alpha.5 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml manifestHash: e8240400cebfa9a338f8ff1f647e203fe43cbbcf name: dns-controller.addons.k8s.io @@ -86,16 +68,7 @@ spec: selector: k8s-addon: storage-aws.addons.k8s.io version: 1.15.0 - - id: k8s-1.9 - kubernetesVersion: <1.12.0 - manifest: networking.weave/k8s-1.9.yaml - manifestHash: 2081d7cb4852b8856060b92016fee42b4a58ea99 - name: networking.weave - selector: - role.kubernetes.io/networking: "1" - version: 2.7.0-kops.1 - id: k8s-1.12 - kubernetesVersion: '>=1.12.0' manifest: networking.weave/k8s-1.12.yaml manifestHash: 3b33c6cfe20c96620178864ba26f4afae37db5fd name: networking.weave