From dcb56b38272d8aca4e31375bc429d2be430b3919 Mon Sep 17 00:00:00 2001 From: Christopher Tauchen Date: Thu, 9 May 2024 15:15:09 +0100 Subject: [PATCH] Removes OSS 3.25 for archiving --- .../components/AutoHostendpointsMigrate.js | 65 - .../components/CalicoWindowsInstall.js | 222 - .../_includes/components/EnvironmentFile.js | 201 - .../components/HostEndpointsUpgrade.js | 84 - .../components/InstallOpenShiftManifests.js | 18 - ...aintenanceImageOptionsAlternateRegistry.js | 72 - .../_includes/components/ReleaseNotes.js | 78 - .../_includes/components/ReqsKernel.js | 70 - .../_includes/components/ReqsSys.js | 330 - .../_includes/content/_calicoctl-version.mdx | 7 - .../_includes/content/_determine-ipam.mdx | 37 - .../content/_docker-container-service.mdx | 80 - .../_includes/content/_ebpf-value.mdx | 12 - .../_includes/content/_endpointport.mdx | 15 - .../_includes/content/_entityrule.mdx | 34 - .../content/_felix-init-datastore.mdx | 19 - .../_includes/content/_httpmatch.mdx | 23 - .../version-3.25/_includes/content/_icmp.mdx | 4 - .../version-3.25/_includes/content/_ipnat.mdx | 6 - .../content/_openstack-etcd-auth.mdx | 16 - .../version-3.25/_includes/content/_ports.mdx | 33 - .../version-3.25/_includes/content/_rule.mdx | 46 - .../_includes/content/_selector-scopes.mdx | 20 - .../_includes/content/_selectors.mdx | 50 - .../content/_serviceaccountmatch.mdx | 6 - .../_includes/content/_servicematch.mdx | 6 - .../_includes/release-notes/.gitkeep | 0 .../release-notes/_v3.25.0-release-notes.mdx | 140 - .../release-notes/_v3.25.1-release-notes.mdx | 16 - .../release-notes/_v3.25.2-release-notes.mdx | 24 - .../version-3.25/about/about-ebpf.mdx | 163 - .../about/about-k8s-networking.mdx | 132 - .../about/about-kubernetes-egress.mdx | 113 - .../about/about-kubernetes-ingress.mdx | 136 - .../about/about-kubernetes-services.mdx | 142 - .../about/about-network-policy.mdx | 244 - .../version-3.25/about/about-networking.mdx | 166 - .../version-3.25/about/index.mdx | 331 - .../getting-started/bare-metal/about.mdx | 58 - .../getting-started/bare-metal/index.mdx | 11 - .../bare-metal/installation/binary-mgr.mdx | 74 - .../bare-metal/installation/binary.mdx | 117 - .../bare-metal/installation/container.mdx | 27 - .../bare-metal/installation/index.mdx | 11 - .../bare-metal/requirements.mdx | 9 - .../version-3.25/getting-started/index.mdx | 11 - .../kubernetes/flannel/index.mdx | 11 - .../flannel/install-for-flannel.mdx | 105 - .../flannel/migration-from-flannel.mdx | 193 - .../hardway/configure-bgp-peering.mdx | 160 - .../kubernetes/hardway/configure-ip-pools.mdx | 87 - .../kubernetes/hardway/end-user-rbac.mdx | 316 - .../kubernetes/hardway/index.mdx | 11 - .../kubernetes/hardway/install-cni-plugin.mdx | 197 - .../kubernetes/hardway/install-node.mdx | 399 - .../kubernetes/hardway/install-typha.mdx | 317 - .../kubernetes/hardway/istio-integration.mdx | 60 - .../kubernetes/hardway/overview.mdx | 47 - .../hardway/standing-up-kubernetes.mdx | 42 - .../hardway/test-network-policy.mdx | 13 - .../kubernetes/hardway/test-networking.mdx | 172 - .../hardway/the-calico-datastore.mdx | 109 - .../getting-started/kubernetes/helm.mdx | 126 - .../getting-started/kubernetes/index.mdx | 11 - .../getting-started/kubernetes/k3s/index.mdx | 11 - .../kubernetes/k3s/multi-node-install.mdx | 222 - .../kubernetes/k3s/quickstart.mdx | 201 - .../kubernetes/managed-public-cloud/aks.mdx | 109 - .../kubernetes/managed-public-cloud/eks.mdx | 228 - .../kubernetes/managed-public-cloud/gke.mdx | 38 - .../kubernetes/managed-public-cloud/iks.mdx | 36 - .../kubernetes/managed-public-cloud/index.mdx | 11 - .../getting-started/kubernetes/microk8s.mdx | 80 - .../getting-started/kubernetes/minikube.mdx | 158 - .../kubernetes/openshift/index.mdx | 11 - .../kubernetes/openshift/installation.mdx | 187 - .../kubernetes/openshift/requirements.mdx | 13 - .../getting-started/kubernetes/quickstart.mdx | 169 - .../getting-started/kubernetes/rancher.mdx | 96 - .../kubernetes/requirements.mdx | 72 - .../self-managed-onprem/config-options.mdx | 355 - .../kubernetes/self-managed-onprem/index.mdx | 11 - .../self-managed-onprem/onpremises.mdx | 230 - .../self-managed-public-cloud/aws.mdx | 115 - .../self-managed-public-cloud/azure.mdx | 87 - .../self-managed-public-cloud/do.mdx | 128 - .../self-managed-public-cloud/gce.mdx | 235 - .../self-managed-public-cloud/index.mdx | 11 - .../kubernetes/vpp/getting-started.mdx | 423 - .../getting-started/kubernetes/vpp/index.mdx | 11 - .../getting-started/kubernetes/vpp/ipsec.mdx | 62 - .../kubernetes/vpp/specifics.mdx | 26 - .../kubernetes/windows-calico/demo.mdx | 630 -- .../kubernetes/windows-calico/index.mdx | 11 - .../kubernetes/windows-calico/kubeconfig.mdx | 115 - .../windows-calico/kubernetes/index.mdx | 11 - .../windows-calico/kubernetes/rancher.mdx | 128 - .../kubernetes/requirements.mdx | 110 - .../windows-calico/kubernetes/standard.mdx | 310 - .../kubernetes/windows-calico/limitations.mdx | 174 - .../kubernetes/windows-calico/maintain.mdx | 65 - .../windows-calico/openshift-installation.mdx | 339 - .../kubernetes/windows-calico/quickstart.mdx | 525 -- .../windows-calico/troubleshoot.mdx | 202 - .../getting-started/openstack/index.mdx | 11 - .../openstack/installation/devstack.mdx | 78 - .../openstack/installation/index.mdx | 11 - .../openstack/installation/overview.mdx | 17 - .../openstack/installation/redhat.mdx | 287 - .../openstack/installation/ubuntu.mdx | 270 - .../openstack/installation/verification.mdx | 175 - .../getting-started/openstack/overview.mdx | 42 - .../openstack/requirements.mdx | 20 - .../network-policy/adopt-zero-trust.mdx | 296 - .../network-policy/comms/crypto-auth.mdx | 144 - .../network-policy/comms/index.mdx | 11 - .../network-policy/comms/reduce-nodes.mdx | 83 - .../network-policy/comms/secure-bgp.mdx | 185 - .../network-policy/comms/secure-metrics.mdx | 512 -- .../encrypt-cluster-pod-traffic.mdx | 263 - .../extreme-traffic/defend-dos-attack.mdx | 107 - .../high-connection-workloads.mdx | 89 - .../network-policy/extreme-traffic/index.mdx | 11 - .../calico-policy/calico-labels.mdx | 110 - .../calico-policy/calico-network-policy.mdx | 258 - .../calico-policy/calico-policy-tutorial.mdx | 219 - .../get-started/calico-policy/index.mdx | 11 - .../network-policy-openstack.mdx | 99 - .../network-policy/get-started/index.mdx | 11 - .../get-started/kubernetes-default-deny.mdx | 151 - .../get-started/kubernetes-policy/index.mdx | 11 - .../kubernetes-policy/kubernetes-demo.mdx | 101 - .../kubernetes-network-policy.mdx | 179 - .../kubernetes-policy-advanced.mdx | 342 - .../kubernetes-policy-basic.mdx | 207 - .../hosts/host-forwarded-traffic.mdx | 151 - .../network-policy/hosts/index.mdx | 11 - .../network-policy/hosts/kubernetes-nodes.mdx | 214 - .../hosts/protect-hosts-tutorial.mdx | 192 - .../network-policy/hosts/protect-hosts.mdx | 184 - .../version-3.25/network-policy/index.mdx | 11 - .../network-policy/istio/app-layer-policy.mdx | 200 - .../istio/enforce-policy-istio.mdx | 232 - .../network-policy/istio/http-methods.mdx | 47 - .../network-policy/istio/index.mdx | 11 - .../network-policy/non-privileged.mdx | 95 - .../policy-rules/external-ips-policy.mdx | 112 - .../network-policy/policy-rules/icmp-ping.mdx | 130 - .../network-policy/policy-rules/index.mdx | 11 - .../policy-rules/namespace-policy.mdx | 89 - .../policy-rules/policy-rules-overview.mdx | 22 - .../policy-rules/service-accounts.mdx | 118 - .../policy-rules/service-policy.mdx | 119 - .../network-policy/services/index.mdx | 11 - .../services/kubernetes-node-ports.mdx | 135 - .../services/services-cluster-ips.mdx | 193 - .../configuring/advertise-service-ips.mdx | 246 - .../networking/configuring/bgp.mdx | 242 - .../networking/configuring/index.mdx | 11 - .../networking/configuring/mtu.mdx | 142 - .../configuring/pod-mac-address.mdx | 35 - .../configuring/sidecar-acceleration.mdx | 72 - .../networking/configuring/use-ipvs.mdx | 55 - .../networking/configuring/vxlan-ipip.mdx | 156 - .../configuring/workloads-outside-cluster.mdx | 64 - .../networking/determine-best-networking.mdx | 288 - .../version-3.25/networking/index.mdx | 11 - .../networking/ipam/add-floating-ip.mdx | 117 - .../ipam/assign-ip-addresses-topology.mdx | 181 - .../networking/ipam/change-block-size.mdx | 260 - .../ipam/get-started-ip-addresses.mdx | 78 - .../version-3.25/networking/ipam/index.mdx | 11 - .../networking/ipam/ip-autodetection.mdx | 308 - .../networking/ipam/ipv6-control-plane.mdx | 27 - .../version-3.25/networking/ipam/ipv6.mdx | 233 - .../networking/ipam/legacy-firewalls.mdx | 66 - .../networking/ipam/migrate-pools.mdx | 227 - .../networking/ipam/use-specific-ip.mdx | 65 - .../networking/openstack/configuration.mdx | 87 - .../networking/openstack/connectivity.mdx | 255 - .../openstack/dev-machine-setup.mdx | 133 - .../networking/openstack/floating-ips.mdx | 129 - .../networking/openstack/host-routes.mdx | 85 - .../networking/openstack/index.mdx | 11 - .../networking/openstack/ipv6.mdx | 56 - .../networking/openstack/kuryr.mdx | 40 - .../networking/openstack/labels.mdx | 141 - .../networking/openstack/multiple-regions.mdx | 161 - .../networking/openstack/neutron-api.mdx | 206 - .../networking/openstack/semantics.mdx | 122 - .../networking/openstack/service-ips.mdx | 536 -- .../operations/calicoctl/configure/etcd.mdx | 190 - .../operations/calicoctl/configure/index.mdx | 11 - .../operations/calicoctl/configure/kdd.mdx | 124 - .../calicoctl/configure/overview.mdx | 74 - .../operations/calicoctl/index.mdx | 11 - .../operations/calicoctl/install.mdx | 410 - .../operations/certificate-management.mdx | 136 - .../operations/datastore-migration.mdx | 146 - .../operations/decommissioning-a-node.mdx | 94 - .../operations/ebpf/enabling-ebpf.mdx | 381 - .../version-3.25/operations/ebpf/index.mdx | 11 - .../version-3.25/operations/ebpf/install.mdx | 426 - .../operations/ebpf/troubleshoot-ebpf.mdx | 247 - .../operations/ebpf/use-cases-ebpf.mdx | 95 - .../version-3.25/operations/fips.mdx | 60 - .../image-options/alternate-registry.mdx | 86 - .../operations/image-options/imageset.mdx | 244 - .../operations/image-options/index.mdx | 11 - .../version-3.25/operations/index.mdx | 11 - .../operations/install-apiserver.mdx | 194 - .../version-3.25/operations/monitor/index.mdx | 11 - .../monitor/monitor-component-metrics.mdx | 661 -- .../monitor/monitor-component-visual.mdx | 199 - .../operations/operator-migration.mdx | 108 - .../operations/troubleshoot/commands.mdx | 471 -- .../troubleshoot/component-logs.mdx | 121 - .../operations/troubleshoot/index.mdx | 11 - .../troubleshoot/troubleshooting.mdx | 101 - .../operations/troubleshoot/vpp.mdx | 424 - .../operations/upgrading/index.mdx | 11 - .../upgrading/kubernetes-upgrade.mdx | 267 - .../upgrading/openshift-upgrade.mdx | 38 - .../upgrading/openstack-upgrade.mdx | 166 - .../version-3.25/reference/api.mdx | 12 - .../reference/architecture/data-path.mdx | 63 - .../reference/architecture/design/index.mdx | 11 - .../design/l2-interconnect-fabric.mdx | 117 - .../design/l3-interconnect-fabric.mdx | 282 - .../reference/architecture/index.mdx | 11 - .../reference/architecture/overview.mdx | 155 - .../reference/calicoctl/apply.mdx | 141 - .../reference/calicoctl/convert.mdx | 96 - .../reference/calicoctl/create.mdx | 140 - .../reference/calicoctl/datastore/index.mdx | 11 - .../calicoctl/datastore/migrate/export.mdx | 101 - .../calicoctl/datastore/migrate/import.mdx | 61 - .../calicoctl/datastore/migrate/index.mdx | 11 - .../calicoctl/datastore/migrate/lock.mdx | 54 - .../calicoctl/datastore/migrate/overview.mdx | 43 - .../calicoctl/datastore/migrate/unlock.mdx | 53 - .../calicoctl/datastore/overview.mdx | 37 - .../reference/calicoctl/delete.mdx | 150 - .../version-3.25/reference/calicoctl/get.mdx | 272 - .../reference/calicoctl/index.mdx | 17 - .../reference/calicoctl/ipam/check.mdx | 68 - .../reference/calicoctl/ipam/configure.mdx | 48 - .../reference/calicoctl/ipam/index.mdx | 11 - .../reference/calicoctl/ipam/overview.mdx | 41 - .../reference/calicoctl/ipam/release.mdx | 58 - .../reference/calicoctl/ipam/show.mdx | 154 - .../reference/calicoctl/ipam/split.mdx | 84 - .../reference/calicoctl/label.mdx | 154 - .../reference/calicoctl/node/checksystem.mdx | 66 - .../reference/calicoctl/node/diags.mdx | 76 - .../reference/calicoctl/node/index.mdx | 11 - .../reference/calicoctl/node/overview.mdx | 47 - .../reference/calicoctl/node/run.mdx | 375 - .../reference/calicoctl/node/status.mdx | 51 - .../reference/calicoctl/overview.mdx | 124 - .../reference/calicoctl/patch.mdx | 111 - .../reference/calicoctl/replace.mdx | 133 - .../reference/calicoctl/version.mdx | 46 - .../reference/configure-calico-node.mdx | 324 - .../reference/configure-cni-plugins.mdx | 594 -- .../etcd-rbac/calico-etcdv3-paths.mdx | 134 - .../etcd-rbac/certificate-generation.mdx | 127 - .../reference/etcd-rbac/index.mdx | 11 - .../etcd-rbac/kubernetes-advanced.mdx | 92 - .../reference/etcd-rbac/kubernetes.mdx | 104 - .../reference/etcd-rbac/overview.mdx | 70 - .../reference/etcd-rbac/users-and-roles.mdx | 59 - .../version-3.25/reference/faq.mdx | 501 -- .../reference/felix/configuration.mdx | 315 - .../version-3.25/reference/felix/index.mdx | 11 - .../reference/felix/prometheus.mdx | 124 - .../reference/host-endpoints/connectivity.mdx | 92 - .../reference/host-endpoints/conntrack.mdx | 29 - .../reference/host-endpoints/failsafe.mdx | 44 - .../reference/host-endpoints/forwarded.mdx | 80 - .../reference/host-endpoints/index.mdx | 11 - .../reference/host-endpoints/objects.mdx | 124 - .../reference/host-endpoints/overview.mdx | 57 - .../reference/host-endpoints/pre-dnat.mdx | 46 - .../reference/host-endpoints/selector.mdx | 30 - .../reference/host-endpoints/summary.mdx | 96 - .../version-3.25/reference/index.mdx | 11 - .../reference/installation/_README.mdx | 7 - .../reference/installation/_api.mdx | 7281 ----------------- .../reference/installation/api.mdx | 11 - .../reference/installation/config.json | 56 - .../version-3.25/reference/involved.mdx | 33 - .../kube-controllers/configuration.mdx | 177 - .../reference/kube-controllers/index.mdx | 11 - .../reference/kube-controllers/prometheus.mdx | 84 - .../reference/public-cloud/aws.mdx | 85 - .../reference/public-cloud/azure.mdx | 49 - .../reference/public-cloud/gce.mdx | 26 - .../reference/public-cloud/ibm.mdx | 15 - .../reference/public-cloud/index.mdx | 11 - .../reference/resources/bgpconfig.mdx | 87 - .../reference/resources/bgppeer.mdx | 119 - .../reference/resources/blockaffinity.mdx | 31 - .../reference/resources/caliconodestatus.mdx | 216 - .../reference/resources/felixconfig.mdx | 265 - .../resources/globalnetworkpolicy.mdx | 167 - .../reference/resources/globalnetworkset.mdx | 55 - .../reference/resources/hostendpoint.mdx | 116 - .../reference/resources/index.mdx | 11 - .../reference/resources/ipamconfig.mdx | 43 - .../reference/resources/ippool.mdx | 146 - .../reference/resources/ipreservation.mdx | 56 - .../resources/kubecontrollersconfig.mdx | 127 - .../reference/resources/networkpolicy.mdx | 151 - .../reference/resources/networkset.mdx | 63 - .../version-3.25/reference/resources/node.mdx | 81 - .../reference/resources/overview.mdx | 101 - .../reference/resources/profile.mdx | 53 - .../reference/resources/workloadendpoint.mdx | 130 - .../reference/rest-api-reference.mdx | 15 - .../reference/typha/configuration.mdx | 99 - .../version-3.25/reference/typha/index.mdx | 11 - .../version-3.25/reference/typha/overview.mdx | 18 - .../reference/typha/prometheus.mdx | 120 - .../reference/vpp/host-network.mdx | 103 - .../version-3.25/reference/vpp/index.mdx | 11 - .../reference/vpp/technical-details.mdx | 80 - .../reference/vpp/uplink-configuration.mdx | 174 - .../version-3.25/release-notes/index.mdx | 10 - .../version-3.25/releases.json | 158 - .../version-3.25/training/index.mdx | 16 - .../version-3.25/variables.js | 43 - .../version-3.25-sidebars.json | 802 -- docusaurus.config.js | 7 +- releases.html | 77 - src/pages/archive.md | 2 +- 336 files changed, 2 insertions(+), 47008 deletions(-) delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/AutoHostendpointsMigrate.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/CalicoWindowsInstall.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/EnvironmentFile.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/HostEndpointsUpgrade.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/InstallOpenShiftManifests.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/MaintenanceImageOptionsAlternateRegistry.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/ReleaseNotes.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/ReqsKernel.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/components/ReqsSys.js delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_ports.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_rule.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/release-notes/.gitkeep delete mode 100644 calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.0-release-notes.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.1-release-notes.mdx delete mode 100644 calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.2-release-notes.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/about-ebpf.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/about-k8s-networking.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/about-kubernetes-egress.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/about-kubernetes-ingress.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/about-kubernetes-services.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/about-network-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/about-networking.mdx delete mode 100644 calico_versioned_docs/version-3.25/about/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/bare-metal/about.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/bare-metal/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary-mgr.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/container.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/bare-metal/requirements.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/install-for-flannel.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/migration-from-flannel.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-bgp-peering.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-ip-pools.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/end-user-rbac.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-cni-plugin.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-node.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-typha.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/istio-integration.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/standing-up-kubernetes.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-network-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-networking.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/the-calico-datastore.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/helm.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/multi-node-install.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/quickstart.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/aks.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/eks.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/gke.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/iks.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/microk8s.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/minikube.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/installation.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/requirements.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/quickstart.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/rancher.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/requirements.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/config-options.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/onpremises.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/aws.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/azure.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/do.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/gce.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/getting-started.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/ipsec.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/specifics.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/demo.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubeconfig.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/rancher.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/requirements.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/standard.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/limitations.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/maintain.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/openshift-installation.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/quickstart.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/troubleshoot.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/installation/devstack.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/installation/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/installation/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/installation/redhat.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/installation/ubuntu.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/installation/verification.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/getting-started/openstack/requirements.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/adopt-zero-trust.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/comms/crypto-auth.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/comms/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/comms/reduce-nodes.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/comms/secure-bgp.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/comms/secure-metrics.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/encrypt-cluster-pod-traffic.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/extreme-traffic/defend-dos-attack.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/extreme-traffic/high-connection-workloads.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/extreme-traffic/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-labels.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-network-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-policy-tutorial.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/network-policy-openstack.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-default-deny.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/hosts/host-forwarded-traffic.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/hosts/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/hosts/kubernetes-nodes.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts-tutorial.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/istio/app-layer-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/istio/enforce-policy-istio.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/istio/http-methods.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/istio/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/non-privileged.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/policy-rules/external-ips-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/policy-rules/icmp-ping.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/policy-rules/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/policy-rules/namespace-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/policy-rules/policy-rules-overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/policy-rules/service-accounts.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/policy-rules/service-policy.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/services/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/services/kubernetes-node-ports.mdx delete mode 100644 calico_versioned_docs/version-3.25/network-policy/services/services-cluster-ips.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/advertise-service-ips.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/bgp.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/mtu.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/pod-mac-address.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/sidecar-acceleration.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/use-ipvs.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/vxlan-ipip.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/configuring/workloads-outside-cluster.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/determine-best-networking.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/add-floating-ip.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/assign-ip-addresses-topology.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/change-block-size.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/get-started-ip-addresses.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/ip-autodetection.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/ipv6-control-plane.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/ipv6.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/legacy-firewalls.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/migrate-pools.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/ipam/use-specific-ip.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/configuration.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/connectivity.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/dev-machine-setup.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/floating-ips.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/host-routes.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/ipv6.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/kuryr.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/labels.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/multiple-regions.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/neutron-api.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/semantics.mdx delete mode 100644 calico_versioned_docs/version-3.25/networking/openstack/service-ips.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/calicoctl/configure/etcd.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/calicoctl/configure/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/calicoctl/configure/kdd.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/calicoctl/configure/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/calicoctl/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/calicoctl/install.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/certificate-management.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/datastore-migration.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/decommissioning-a-node.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/ebpf/enabling-ebpf.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/ebpf/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/ebpf/install.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/ebpf/troubleshoot-ebpf.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/ebpf/use-cases-ebpf.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/fips.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/image-options/alternate-registry.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/image-options/imageset.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/image-options/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/install-apiserver.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/monitor/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/monitor/monitor-component-metrics.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/monitor/monitor-component-visual.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/operator-migration.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/troubleshoot/commands.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/troubleshoot/component-logs.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/troubleshoot/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/troubleshoot/troubleshooting.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/troubleshoot/vpp.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/upgrading/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/upgrading/kubernetes-upgrade.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/upgrading/openshift-upgrade.mdx delete mode 100644 calico_versioned_docs/version-3.25/operations/upgrading/openstack-upgrade.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/api.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/architecture/data-path.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/architecture/design/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/architecture/design/l2-interconnect-fabric.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/architecture/design/l3-interconnect-fabric.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/architecture/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/architecture/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/apply.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/convert.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/create.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/export.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/import.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/lock.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/unlock.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/datastore/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/delete.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/get.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/ipam/check.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/ipam/configure.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/ipam/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/ipam/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/ipam/release.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/ipam/show.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/ipam/split.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/label.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/node/checksystem.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/node/diags.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/node/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/node/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/node/run.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/node/status.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/patch.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/replace.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/calicoctl/version.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/configure-calico-node.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/configure-cni-plugins.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/etcd-rbac/calico-etcdv3-paths.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/etcd-rbac/certificate-generation.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/etcd-rbac/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes-advanced.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/etcd-rbac/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/etcd-rbac/users-and-roles.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/faq.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/felix/configuration.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/felix/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/felix/prometheus.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/connectivity.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/conntrack.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/failsafe.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/forwarded.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/objects.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/pre-dnat.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/selector.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/host-endpoints/summary.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/installation/_README.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/installation/_api.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/installation/api.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/installation/config.json delete mode 100644 calico_versioned_docs/version-3.25/reference/involved.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/kube-controllers/configuration.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/kube-controllers/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/kube-controllers/prometheus.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/public-cloud/aws.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/public-cloud/azure.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/public-cloud/gce.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/public-cloud/ibm.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/public-cloud/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/bgpconfig.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/bgppeer.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/blockaffinity.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/caliconodestatus.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/felixconfig.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/globalnetworkpolicy.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/globalnetworkset.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/hostendpoint.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/ipamconfig.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/ippool.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/ipreservation.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/kubecontrollersconfig.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/networkpolicy.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/networkset.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/node.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/profile.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/resources/workloadendpoint.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/rest-api-reference.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/typha/configuration.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/typha/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/typha/overview.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/typha/prometheus.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/vpp/host-network.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/vpp/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/vpp/technical-details.mdx delete mode 100644 calico_versioned_docs/version-3.25/reference/vpp/uplink-configuration.mdx delete mode 100644 calico_versioned_docs/version-3.25/release-notes/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/releases.json delete mode 100644 calico_versioned_docs/version-3.25/training/index.mdx delete mode 100644 calico_versioned_docs/version-3.25/variables.js delete mode 100644 calico_versioned_sidebars/version-3.25-sidebars.json delete mode 100644 releases.html diff --git a/calico_versioned_docs/version-3.25/_includes/components/AutoHostendpointsMigrate.js b/calico_versioned_docs/version-3.25/_includes/components/AutoHostendpointsMigrate.js deleted file mode 100644 index 2fa83d4362..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/AutoHostendpointsMigrate.js +++ /dev/null @@ -1,65 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import Link from '@docusaurus/Link'; -import CodeBlock from '@theme/CodeBlock'; -import Heading from '@theme/Heading'; - -import { prodname, baseUrl } from '../../variables'; - -export default function AutoHostendpointsMigrate(props) { - return ( - <> - - Migrating to auto host endpoints - - - Auto host endpoints have an allow-all profile attached which allows all traffic in the absence of network - policy. This may result in unexpected behavior and data. - -

In order to migrate existing all-interfaces host endpoints to {prodname}-managed auto host endpoints:

-
    -
  1. -

    - Add any labels on existing all-interfaces host endpoints to their corresponding {props.orch} nodes.{' '} - {prodname} manages labels on automatic host endpoints by syncing labels from their nodes. Any labels on - existing all-interfaces host endpoints should be added to their respective nodes. For example, if your - existing all-interface host endpoint for node node1 has the label{' '} - environment: dev, then you must add that same label to its node: -

    - - {props.orch === 'OpenShift' - ? 'oc label node node1 environment=dev' - : 'kubectl label node node1 environment=dev'} - -
  2. -
  3. -

    - Enable auto host endpoints by following the{' '} - - enable automatic host endpoints how-to guide - - . Note that automatic host endpoints are created with a profile attached that allows all traffic in the - absence of network policy. -

    - - calicoctl patch kubecontrollersconfiguration default --patch = - {'{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}'} - -
  4. -
  5. -

    - Delete old all-interfaces host endpoints. You can distinguish host endpoints managed by {prodname} from - others in several ways. First, automatic host endpoints have the label{' '} - projectcalico.org/created-by: calico-kube-controllers. Secondly, automatic host - endpoints' name have the suffix -auto-hep. -

    - calicoctl delete hostendpoint <old_hostendpoint_name> -
  6. -
- - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/CalicoWindowsInstall.js b/calico_versioned_docs/version-3.25/_includes/components/CalicoWindowsInstall.js deleted file mode 100644 index 8e3a81fc49..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/CalicoWindowsInstall.js +++ /dev/null @@ -1,222 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; - -import { prodname, prodnameWindows, manifestsUrl } from '../../variables'; - -function CalicoWindowsInstallFirstStep(props) { - if (props.networkingType === 'vxlan') { - return ( -
  • - Ensure that BGP is disabled. - -
  • - ); - } - - return ( -
  • -

    - Enable BGP service on the Windows nodes. Install the RemoteAccess service using the following Powershell - commands: -

    - - Install-WindowsFeature RemoteAccess{'\n'} - Install-WindowsFeature RSAT-RemoteAccess-PowerShell - {'\n'} - Install-WindowsFeature Routing - -

    Then restart the computer:

    - Restart-Computer -Force -

    before running:

    - Install-RemoteAccess -VpnType RoutingOnly -

    - Sometimes the remote access service fails to start automatically after install. To make sure it is running, run - the following command: -

    - Start-Service RemoteAccess -
  • - ); -} - -export default function CalicoWindowsInstall(props) { - return ( -
      - -
    1. -

      Download the {prodnameWindows} installation manifest.

      - - {props.networkingType === 'vxlan' - ? `curl ${manifestsUrl}/manifests/calico-windows-vxlan.yaml -o calico-windows.yaml` - : `curl ${manifestsUrl}/manifests/calico-windows-bgp.yaml -o calico-windows.yaml`} - -
    2. -
    3. -

      - Get the cluster's Kubernetes API server host and port, which will be used to update the {prodnameWindows}{' '} - config map. The API server host and port is required so that the {prodnameWindows} installation script can - create a kubeconfig file for {prodname} services. If your Windows nodes already have {prodnameWindows}{' '} - installed manually, skip this step. The installation script will use the API server host and port from your - node's existing kubeconfig file if the KUBERNETES_SERVICE_HOST and{' '} - KUBERNETES_SERVICE_PORT variables are not provided in the calico-windows-config{' '} - ConfigMap. -

      -

      First, make a note of the address of the API server:

      -
        -
      • -

        - If you have a single API server with a static IP address, you can use its IP address and port. The IP can - be found by running: -

        - kubectl get endpoints kubernetes -o wide -

        The output should look like the following, with a single IP address and port under "ENDPOINTS":

        - - {`NAME ENDPOINTS AGE -kubernetes 172.16.101.157:6443 40m`} - -

        - If there are multiple entries under "ENDPOINTS", then your cluster must have more than one API server. In - this case, use the appropriate load balancing option below for your cluster. -

        -
      • -
      • -

        - If using DNS load balancing (as used by kops), use the FQDN and port of the API server{' '} - - api.internal.{'<'}clustername{'>'} - - . -

        -
      • -
      • -

        - If you have multiple API servers with a load balancer in front, you should use the IP and port of the load - balancer. -

        -
      • - -

        - If your cluster uses a ConfigMap to configure kube-proxy you can find the "right" way to - reach the API server by examining the config map. For example: -

        - - kubectl get configmap -n kube-system kube-proxy -o yaml | grep server`{'\n'} - server: https://d881b853ae312e00302a84f1e346a77.gr7.us-west-2.eks.amazonaws.com - -

        - In this case, the server is d881b853aea312e00302a84f1e346a77.gr7.us-west-2.eks.amazonaws.com{' '} - and the port is 443 (the standard HTTPS port). -

        -
        -
      -
    4. -
    5. -

      - Edit the calico-windows-config ConfigMap in the downloaded manifest and ensure the required - variables are correct for your cluster. -

      -
        -
      • - {props.networkingType === 'vxlan' ? ( - <> - CALICO_NETWORKING_BACKEND: This should be set to vxlan. - - ) : ( - <> - CALICO_NETWORKING_BACKEND: This should be set to windows-bgp. - - )} -
      • -
      • - KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT: The Kubernetes API server - host and port (discovered in the previous step) used to create a kubeconfig file for {prodname} services. If - your node already has an existing kubeconfig file, leave these variables blank. -
      • -
      • - K8S_SERVICE_CIDR: The Kubernetes service clusterIP range configured in your cluster. This must - match the service-cluster-ip-range used by kube-apiserver. -
      • -
      • - CNI_BIN_DIR: Path where {prodname} CNI binaries will be installed. This must match the CNI bin - value in the ContainerD service configuration. If you used the provided Install-Containerd.ps1 script, you - should use the CNI bin path value you provided to that script. -
      • -
      • - CNI_CONF_DIR: Path where {prodname} CNI configuration will be installed. This must match the - CNI conf value in the ContainerD service configuration. If you used the provided Install-Containerd.ps1 - script, you should use the CNI conf path value you provided to that script. -
      • -
      • - DNS_NAME_SERVERS: The DNS nameservers that will be used in the CNI configuration. -
      • -
      • - FELIX_HEALTHENABLED: The Felix health check server must be enabled. -
      • -
      -
    6. -
    7. -

      Apply the {prodnameWindows} installation manifest.

      - kubectl create -f calico-windows.yaml -
    8. -
    9. -

      Monitor the installation.

      - - kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c install - -

      - After the log Calico for Windows installed appears, installation is complete. Next, the{' '} - {prodnameWindows} services are started in separate containers: -

      - - kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c node{'\n'} - kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c felix{'\n'} - {props.networkingType === 'windows-bgp' - ? `kubectl logs -f -n calico-system -l k8s-app=calico-node-windows -c confd` - : null} - -
    10. -
    11. - Install kube-proxy. -

      - Depending on your platform, you may already have kube-proxy running on your Windows nodes. If kube-proxy is - already running on your Windows nodes, skip this step. If kube-proxy is not running, you must install and run - kube-proxy on each of the Windows nodes in your cluster. Note: The provided manifest depends on the kubeconfig - provided by the kube-proxy ConfigMap in the kube-system namespace. -

      -
        -
      • -

        Download the kube-proxy manifest:

        - curl {manifestsUrl}/manifests/windows-kube-proxy.yaml -o windows-kube-proxy.yaml -
      • -
      • - Edit the downloaded manifest -
          -
        • - Replace VERSION with your Windows nodes' server version. E.g. 1809. -
        • -
        • - Update the K8S_VERSION env variable value with your Kubernetes cluster version. -
        • -
        -
      • -
      • -

        Apply the manifest

        - kubectl apply -f windows-kube-proxy.yaml -
      • -
      • -

        Verify the kube-proxy-windows daemonset is running

        - kubectl describe ds -n kube-system kube-proxy-windows -
      • -
      -
    12. -
    - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/EnvironmentFile.js b/calico_versioned_docs/version-3.25/_includes/components/EnvironmentFile.js deleted file mode 100644 index cdeecc20ee..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/EnvironmentFile.js +++ /dev/null @@ -1,201 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import Link from '@docusaurus/Link'; - -import { prodname, baseUrl } from '../../variables'; - -export default function EnvironmentFile(props) { - if (props.target === 'felix') { - var etcd_endpoints = 'FELIX_ETCDENDPOINTS'; - var etcd_cert_file = 'FELIX_ETCDCERTFILE'; - var etcd_key_file = 'FELIX_ETCDKEYFILE'; - var etcd_ca_file = 'FELIX_ETCDCAFILE'; - var datastore_type = 'FELIX_DATASTORETYPE'; - } else { - var etcd_endpoints = 'ETCD_ENDPOINTS'; - var etcd_cert_file = 'ETCD_CERT_FILE'; - var etcd_key_file = 'ETCD_KEY_FILE'; - var etcd_ca_file = 'ETCD_CA_CERT_FILE'; - var datastore_type = 'DATASTORE_TYPE'; - } - - return ( - <> -

    - - Use the following guidelines and sample file to define the environment variables for starting Calico on the - host. For more help, see the{' '} - - {props.install === 'container' ? ( - {props.nodecontainer} configuration reference - ) : ( - Felix configuration reference - )} -

    - - -

    For a Kubernetes datastore (default) set the following:

    - - - - - - - - - - - - - - - - - -
    VariableConfiguration guidance
    FELIX_DATASTORETYPE - Set to kubernetes -
    KUBECONFIGPath to kubeconfig file to access the Kubernetes API Server
    - {props.install === 'container' && ( - - You will need to volume mount the kubeconfig file into the container at the location specified by the - paths mentioned above. - - )} -
    - -

    For an etcdv3 datastore set the following:

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableConfiguration guidance
    {datastore_type} - Set to etcdv3 -
    {etcd_endpoints}Comma separated list of etcdv3 cluster URLs, e.g. https://calico-datastore.example.com:2379
    {etcd_ca_file} - Path to CA certificate to validate etcd’s server cert. Required if using TLS and not using a public - CA. -
    - {etcd_cert_file} -
    - {etcd_key_file} -
    Paths to certificate and keys used for client authentication to the etcd cluster, if enabled.
    - {props.install === 'container' && ( - - If using certificates and keys, you will need to volume mount them into the container at the location - specified by the paths mentioned above. - - )} -
    - -

    For either datastore set the following:

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    VariableConfiguration guidance
    CALICO_NODENAME - Identifies the node. If a value is not specified, the compute server hostname is used to identify the - Calico node. -
    CALICO_IP or CALICO_IP6 - If values are not specified for both, {prodname} uses the currently-configured values for the next hop - IP addresses for this node—these can be configured through the Node resource. If no next hop addresses - are configured, {prodname} automatically determines an IPv4 next hop address by querying the host - interfaces (and configures this value in the Node resource). You can set CALICO_IP to{' '} - autodetect for force auto-detection of IP address every time the node starts. If you set - IP addresses through these environment variables, it reconfigures any values currently set through the - Node resource. -
    CALICO_AS - If not specified, {prodname} uses the currently configured value for the AS Number for the node BGP - client—this can be configured through the Node resource. If the Node resource value is not set, Calico - inherits the AS Number from the global default value. If you set a value through this environment - variable, it reconfigures any value currently set through the Node resource. -
    NO_DEFAULT_POOLS - Set to true to prevent {prodname} from creating a default pool if one does not exist. Pools are used - for workload endpoints and not required for non-cluster hosts. -
    CALICO_NETWORKING_BACKEND - The networking backend to use. In bird mode, Calico will provide BGP networking using the - BIRD BGP daemon; VXLAN networking can also be used. In vxlan mode, only VXLAN networking - is provided; BIRD and BGP are disabled. If you want to run Calico for policy only, set to{' '} - none. -
    -

    - Sample EnvironmentFile - save to /etc/calico/calico.env -

    - - {`${datastore_type}=etcdv3 -${etcd_endpoints}=https://calico-datastore.example.com:2379 -${etcd_ca_file}="/pki/ca.pem" -${etcd_cert_file}="/pki/client-cert.pem" -${etcd_key_file}="/pki/client-key.pem"`} - {props.install === 'container' - ? ` -CALICO_NODENAME="" -NO_DEFAULT_POOLS="true" -CALICO_IP="" -CALICO_IP6="" -CALICO_AS="" -CALICO_NETWORKING_BACKEND=bird` - : ''} - -
    -
    - - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/HostEndpointsUpgrade.js b/calico_versioned_docs/version-3.25/_includes/components/HostEndpointsUpgrade.js deleted file mode 100644 index ebacc53bb8..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/HostEndpointsUpgrade.js +++ /dev/null @@ -1,84 +0,0 @@ -import React from 'react'; -import Admonition from '@theme/Admonition'; -import CodeBlock from '@theme/CodeBlock'; -import Heading from '@theme/Heading'; - -import { prodname, version } from '../../variables'; - -export default function HostEndpointsUpgrade(props) { - return ( - <> - - Host Endpoints - - - If your cluster has host endpoints with interfaceName: * you must prepare your cluster before - upgrading. Failure to do so will result in an outage. - -

    - In versions of {prodname} prior to v3.14, all-interfaces host endpoints (host endpoints with{' '} - interfaceName: *) only supported pre-DNAT policy. The default behavior of all-interfaces host - endpoints, in the absence of any policy, was to allow all traffic. -

    -

    - Beginning from v3.14, all-interfaces host endpoints support normal policy in addition to pre-DNAT policy. The - support for normal policy includes a change in default behavior for all-interfaces host endpoints: in the - absence of policy the default behavior is to drop traffic. This default behavior is consistent - with "named" host endpoints (which specify a named interface such as "eth0"); named host - endpoints drop traffic in the absence of policy. -

    -

    - Before upgrading to {version}, you must ensure that global network policies are in place that select existing - all-interfaces host endpoints and explicitly allow existing traffic flows. As a starting point, you can create - an allow-all policy that selects existing all-interfaces host endpoints. First, we'll add a label to the - existing host endpoints. Get a list of the nodes that have an all-interfaces host endpoint: -

    - calicoctl get hep -owide | grep | awk '"{'print $1'}"' -

    - With the names of the all-interfaces host endpoints, we can label each host endpoint with a new label (for - example, host-endpoint-upgrade: ""): -

    - - calicoctl get hep -owide | grep '*' | awk '"{'print $1'}"' \ -
    - {props.orch === 'OpenShift' - ? '| xargs -I {} oc exec -i -n kube-system calicoctl -- /calicoctl label hostendpoint {} host-endpoint-upgrade=' - : '| xargs -I {} kubectl exec -i -n kube-system calicoctl -- /calicoctl label hostendpoint {} host-endpoint-upgrade= '} -
    -

    - Now that the nodes with an all-interfaces host endpoint are labeled with host-endpoint-upgrade, - we can create a policy to log and allow all traffic going into or out of the host endpoints temporarily: -

    - - {`cat > allow-all-upgrade.yaml < -

    Apply the policy:

    - calicoctl apply -f - {'<'} allow-all-upgrade.yaml -

    - After applying this policy, all-interfaces host endpoints will log and allow all traffic through them. This - policy will allow all traffic not accounted for by other policies. After upgrading, please review syslog logs - for traffic going through the host endpoints and update the policy as needed to secure traffic to the host - endpoints. -

    - - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/InstallOpenShiftManifests.js b/calico_versioned_docs/version-3.25/_includes/components/InstallOpenShiftManifests.js deleted file mode 100644 index 5732ed8745..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/InstallOpenShiftManifests.js +++ /dev/null @@ -1,18 +0,0 @@ -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; - -import { prodname, releaseTitle, calicoReleasesURL } from '../../variables'; - -export default function InstallOpenShiftManifests() { - return ( - <> -

    Download the {prodname} manifests for OpenShift and add them to the generated manifests directory:

    - - {`mkdir calico -wget -qO- ${calicoReleasesURL}/${releaseTitle}/ocp.tgz | tar xvz --strip-components=1 -C calico -cp calico/* manifests/`} - - - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/MaintenanceImageOptionsAlternateRegistry.js b/calico_versioned_docs/version-3.25/_includes/components/MaintenanceImageOptionsAlternateRegistry.js deleted file mode 100644 index f937b1cfb5..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/MaintenanceImageOptionsAlternateRegistry.js +++ /dev/null @@ -1,72 +0,0 @@ -// Temporary component for "calico\maintenance\image-options\alternate-registry.mdx" - -import React from 'react'; - -import CodeBlock from '@theme/CodeBlock'; - -import { imageNames, prodname, registry, releases, tigeraOperator } from '../../variables'; - -export default function MaintenanceImageOptionsAlternateRegistry() { - const cmds1 = renderInstallCommands( - (componentData, reg, componentNames, i) => `docker pull ${reg}${componentNames[i]}:${componentData.version}` - ); - - const cmds2 = renderInstallCommands( - (componentData, reg, componentNames, i) => - `docker tag ${reg}${componentNames[i]}:${componentData.version} $REGISTRY/${componentNames[i]}:${componentData.version}` - ); - - const cmds3 = renderInstallCommands( - (componentData, reg, componentNames, i) => `docker push $REGISTRY/${componentNames[i]}:${componentData.version}` - ); - - return ( -
      -
    1. -

      Use the following commands to pull the required {prodname} images.

      -
    2. - - docker pull {tigeraOperator.registry}/{tigeraOperator.image}:{tigeraOperator.version} - {'\n'} - {cmds1} - -
    3. -

      - Retag the images with the name of your registry $REGISTRY. -

      -
    4. - - docker tag {tigeraOperator.registry}/{tigeraOperator.image}:{tigeraOperator.version} $REGISTRY/ - {tigeraOperator.image}:{tigeraOperator.version} - {'\n'} - {cmds2} - -
    5. -

      Push the images to your registry.

      -
    6. - - docker push $REGISTRY/{tigeraOperator.image}:{tigeraOperator.version} - {'\n'} - {cmds3} - -
    - ); -} - -function renderInstallCommands(renderCommand) { - const releaseComponents = releases[0].components; - const components = Object.keys(releaseComponents); - const filteredComponents = components.filter((c) => imageNames[c] && !c.includes('flannel')); - const componentNames = filteredComponents.map((c) => imageNames[c]); - - const result = filteredComponents - .map((c, i) => { - const componentData = releaseComponents[c]; - const reg = componentData.registry ? `${componentData.registry}/` : registry; - - return renderCommand(componentData, reg, componentNames, i); - }) - .join('\n'); - - return result; -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/ReleaseNotes.js b/calico_versioned_docs/version-3.25/_includes/components/ReleaseNotes.js deleted file mode 100644 index 2422b554f9..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/ReleaseNotes.js +++ /dev/null @@ -1,78 +0,0 @@ -import React from 'react'; -import Link from '@docusaurus/Link'; -import Heading from '@theme/Heading'; - -import { toKebab } from '@site/src/components/utils/formatters'; -import { componentUrl } from '@site/src/components/utils/componentUrl'; - -import variables from '../../variables'; - -export default function ReleaseNotes() { - const { prodname, version, imageNames } = variables; - - const releases = variables.releases.map((release) => { - let note = release.note; - try { - if (!note) { - note = require(`../release-notes/_${release.title}-release-notes.mdx`).default({}); - } - } catch { - console.error(`Cannot find "/_includes/release-notes/_${release.title}-release-notes.mdx" file`); - } - - return { - ...release, - note, - }; - }); - - return ( - <> - {releases.map((release) => ( -
    - - Calico Open Source {release.title} - - {release.title !== 'master' && ( -

    - - Release archive - {' '} - with Kubernetes manifests, Docker images and binaries. -

    - )} - {release.note} - - - - - - - - - {Object.keys(release.components).map((comp) => { - // Use the imageName for the component, if it has one, for better readability - const componentName = imageNames[comp] || comp; - - return ( - - - - - ); - })} - -
    ComponentVersion
    {componentName} - {release.components[comp].version} -
    -
    - ))} - - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/ReqsKernel.js b/calico_versioned_docs/version-3.25/_includes/components/ReqsKernel.js deleted file mode 100644 index ed796bdf49..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/ReqsKernel.js +++ /dev/null @@ -1,70 +0,0 @@ -import React from 'react'; -import Admonition from '@theme/Admonition'; -import Heading from '@theme/Heading'; - -import { prodname } from '../../variables'; - -export default function ReqsKernel() { - return ( - <> - - Kernel Dependencies - - -

    If you are using one of the recommended distributions, you will already satisfy these.

    -
    -

    - Due to the large number of distributions and kernel version out there, it’s hard to be precise about the names - of the particular kernel modules that are required to run {prodname}. However, in general, you’ll need: -

    -
      -
    • -

      - The iptables modules (both the “legacy” and “nft” variants are supported). These are typically - broken up into many small modules, one for each type of match criteria and one for each type of action.{' '} - {prodname} requires: -

      -
        -
      • The “base” modules (including the IPv6 versions if IPv6 is enabled in your cluster).
      • -
      • - At least the following match criteria: set,rpfilter, addrtype,{' '} - comment,conntrack, icmp, tcp,udp,{' '} - ipvs, icmpv6 (if IPv6 is enabled in your kernel), mark,{' '} - multiport,rpfilter, sctp, ipvs (if using - kube-proxy in IPVS mode). -
      • -
      • - At least the following actions: REJECT,ACCEPT, DROP,{' '} - LOG. -
      • -
      -
    • -
    • -

      IP sets support.

      -
    • -
    • -

      Netfilter Conntrack support compiled in (with SCTP support if using SCTP).

      -
    • -
    • -

      - IPVS support if using kube-proxy in IPVS mode. -

      -
    • -
    • -

      - IPIP, VXLAN, Wireguard support, if using {prodname} - networking in one of those modes. -

      -
    • -
    • -

      - eBPF (including the tc hook support) and XDP (if you want to use the eBPF dataplane). -

      -
    • -
    - - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/components/ReqsSys.js b/calico_versioned_docs/version-3.25/_includes/components/ReqsSys.js deleted file mode 100644 index 521f31bdbc..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/components/ReqsSys.js +++ /dev/null @@ -1,330 +0,0 @@ -import React from 'react'; - -import Admonition from '@theme/Admonition'; -import Link from '@docusaurus/Link'; -import Heading from '@theme/Heading'; - -import { orchestrators } from '@site/variables'; -import { prodname, baseUrl } from '../../variables'; - -function NodeRequirementsOSS(props) { - return ( - <> - - Node requirements - -
      -
    • -

      x86-64, arm64, ppc64le, or s390x processor

      -
    • -
    • -

      - {prodname} must be able to manage cali* interfaces on the host. When IPIP is enabled (the - default), {prodname} also needs to be able to manage tunl* interfaces. When VXLAN is enabled,{' '} - {prodname} also needs to be able to manage the - vxlan.calico interface. -

      -
    • -
    • -

      - Linux kernel 3.10 or later with required dependencies. The - following distributions have the required kernel, its dependencies, and are known to work well with{' '} - {prodname} and {props.orch}. -

      -
        -
      • RedHat Linux 7
      • - {(props.orch === orchestrators.Kubernetes || props.orch === orchestrators.HostProtection) && ( - <> -
      • CentOS 7
      • -
      • CoreOS Container Linux stable
      • -
      • Ubuntu 18.04
      • -
      • Debian 8
      • - - )} - {props.orch === orchestrators.OpenShift && ( - <> -
      • RedHat Container OS
      • - - )} - {props.orch === orchestrators.OpenStack && ( - <> -
      • Ubuntu 18.04
      • -
      • CentOS 8
      • - - )} -
      -
    • -
    - - ); -} - -function NotesOSS() { - return ( - <> -
    - -

    - Many Linux distributions, such as most of the above, include NetworkManager. By default, NetworkManager does - not allow {prodname} to manage interfaces. If your nodes have NetworkManager, complete the steps in{' '} - - Preventing NetworkManager from controlling {prodname} interfaces - {' '} - before installing {prodname}. -

    -
    -
    -
      -
    • - If your Linux distribution comes with installed Firewalld or another iptables manager it should be disabled. - These may interfere with rules added by {prodname} and result in unexpected behavior. -
    • -
    -
    - -

    - If a host firewall is needed, it can be configured by {prodname} HostEndpoint and GlobalNetworkPolicy. More - information about configuration at Security for host. -

    -
    -
    - - ); -} - -function KeyValueStore(props) { - return ( - <> - - Key/value store - -

    - {prodname} requires a key/value store accessible by all {prodname} components.  - { - { - OpenShift: With OpenShift, the Kubernetes API datastore is used for the key/value store., - Kubernetes: ( - - On Kubernetes, you can configure {prodname} to access an etcdv3 cluster directly or to use the - Kubernetes API datastore. - - ), - OpenStack: ( - - For production you will likely want multiple nodes for greater performance and reliability. If you don’t - already have an etcdv3 cluster to connect to, please refer to{' '} - the upstream etcd docs for detailed advice and setup. - - ), - 'host protection': The key/value store must be etcdv3., - }[props.orch] - } -

    - - ); -} - -function NetworkRequirementsOSS(props) { - return ( - <> - - Network requirements - -

    Ensure that your hosts and firewalls allow the necessary traffic based on your configuration.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - {props.orch === orchestrators.OpenShift && ( - <> - - - - - - - - - - - - - - - - - - - - )} - {props.orch === orchestrators.Kubernetes && ( - <> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - )} - {props.orch !== orchestrators.Kubernetes && props.orch !== orchestrators.OpenShift && ( - - - - - - - )} - -
    ConfigurationHost(s)Connection typePort/protocol
    {prodname} networking (BGP)AllBidirectionalTCP 179
    {prodname} networking with IP-in-IP enabled (default)AllBidirectional - IP-in-IP, often represented by its protocol number 4 -
    {prodname} networking with VXLAN enabledAllBidirectionalUDP 4789
    Typha accessTypha agent hostsIncomingTCP 5473 (default)
    Allkube-apiserver hostIncomingOften TCP 443 or 8443*
    {prodname} networking with VXLAN enabledAllBidirectionalUDP 4789
    {prodname} networking with Typha enabledTypha agent hostsIncomingTCP 5473 (default)
    {prodname} networking with IPv4 Wireguard enabledAllBidirectionalUDP 51820 (default)
    {prodname} networking with IPv6 Wireguard enabledAllBidirectionalUDP 51821 (default)
    flannel networking (VXLAN)AllBidirectionalUDP 4789
    Allkube-apiserver hostIncomingOften TCP 443 or 6443*
    etcd datastoreetcd hostsIncoming - - Officially - {' '} - TCP 2379 but can vary -
    Alletcd hostsIncoming - - Officially - {' '} - TCP 2379 but can vary -
    - {(props.orch === orchestrators.Kubernetes || props.orch === orchestrators.OpenShift) && ( -

    - *{' '} - - The value passed to kube-apiserver using the --secure-port flag. If you cannot locate this, - check the targetPort value returned by - kubectl get svc kubernetes -o yaml. - -

    - )} - {props.orch === orchestrators.OpenStack && ( -

    - *{' '} - - If your compute hosts connect directly and don’t use IP-in-IP, you don’t need to allow IP-in-IP traffic. - -

    - )} - - ); -} - -function Privileges(props) { - return ( - <> - - Privileges - -

    - Ensure that {prodname} has the CAP_SYS_ADMIN privilege. -

    -

    - The simplest way to provide the necessary privilege is to run {prodname} as root or in a privileged container. -

    - {props.orch === orchestrators.Kubernetes && ( - <> -

    - When installed as a Kubernetes daemon set, {prodname} meets this requirement by running as a privileged - container. This requires that the kubelet be allowed to run privileged containers. There are two ways this - can be achieved. -

    -
      -
    • - Specify --allow-privileged on the kubelet (deprecated). -
    • -
    • - Use a{' '} - pod security policy. -
    • -
    - - )} - - ); -} - -export default function ReqsSys(props) { - return ( - <> - - - - - - - ); -} diff --git a/calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx b/calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx deleted file mode 100644 index fe02003d55..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx +++ /dev/null @@ -1,7 +0,0 @@ -| Field | Value | -| ------------------- | ------------------------------------------------------ | -| `Client Version` | Version of `calicoctl` | -| `Build date` | Time and date of `calicoctl` build | -| `Git commit` | Git commit number of `calicoctl` | -| `Cluster Version`\* | Version number of `{{nodecontainer}}` and {{prodname}} | -| `Cluster Type`\* | Other details about the cluster | diff --git a/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx b/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx deleted file mode 100644 index a81831bd4f..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx +++ /dev/null @@ -1,37 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -If you are not sure which IPAM your cluster is using, the way to tell depends on install method. - - - - -The IPAM plugin can be queried on the default Installation resource. - -``` -kubectl get installation default -o go-template --template {{.spec.cni.ipam.type}} -``` - -If your cluster is using Calico IPAM, the above command should return a result of `Calico`. - - - - -SSH to one of your Kubernetes nodes and examine the CNI configuration. - -``` -cat /etc/cni/net.d/10-calico.conflist -``` - -Look for the entry: - -``` - "ipam": { - "type": "calico-ipam" - }, -``` - -If it is present, you are using the {{prodname}} IPAM. If the IPAM is not {{prodname}}, or the 10-calico.conflist file does not exist, you cannot use these features in your cluster. - - - diff --git a/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx b/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx deleted file mode 100644 index f072233f84..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx +++ /dev/null @@ -1,80 +0,0 @@ -This section describes how to run `{{nodecontainer}}` as a Docker container. - -:::note - -We include examples for systemd, but the commands can be -applied to other init daemons such as upstart. - -::: - -### Step 1: Create environment file - - - -### Step 2: Configure the init system - -Use an init daemon (like systemd or upstart) to start the {{nodecontainer}} image as a service using the EnvironmentFile values. - -Sample systemd service file: `{{noderunning}}.service` - -```shell -[Unit] -Description={{noderunning}} -After=docker.service -Requires=docker.service - -[Service] -EnvironmentFile=/etc/calico/calico.env -ExecStartPre=-/usr/bin/docker rm -f {{noderunning}} -ExecStart=/usr/bin/docker run --net=host --privileged \ - --name={{noderunning}} \ - -e NODENAME=${CALICO_NODENAME} \ - -e IP=${CALICO_IP} \ - -e IP6=${CALICO_IP6} \ - -e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \ - -e AS=${CALICO_AS} \ - -e NO_DEFAULT_POOLS=${NO_DEFAULT_POOLS} \ - -e DATASTORE_TYPE=${DATASTORE_TYPE} \ - -e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \ - -e ETCD_CA_CERT_FILE=${ETCD_CA_CERT_FILE} \ - -e ETCD_CERT_FILE=${ETCD_CERT_FILE} \ - -e ETCD_KEY_FILE=${ETCD_KEY_FILE} \ - -e KUBECONFIG=${KUBECONFIG} \ - -v /var/log/calico:/var/log/calico \ - -v /var/lib/calico:/var/lib/calico \ - -v /var/run/calico:/var/run/calico \ - -v /run/docker/plugins:/run/docker/plugins \ - -v /lib/modules:/lib/modules \ - -v /etc/pki:/pki \ - {{registry}}{{imageNames.calico/node}}:{{releaseTitle}} /bin/calico-node -felix - -ExecStop=-/usr/bin/docker stop {{noderunning}} - -Restart=on-failure -StartLimitBurst=3 -StartLimitInterval=60s - -[Install] -WantedBy=multi-user.target -``` - -Upon start, the systemd service: - -- Confirms Docker is installed under the `[Unit]` section -- Gets environment variables from the environment file above -- Removes existing `{{nodecontainer}}` container (if it exists) -- Starts `{{nodecontainer}}` - -The script also stops the `{{nodecontainer}}` container when the service is stopped. - -:::note - -Depending on how you've installed Docker, the name of the Docker service -under the `[Unit]` section may be different (such as `docker-engine.service`). -Be sure to check this before starting the service. - -::: diff --git a/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx b/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx deleted file mode 100644 index ca2baf3587..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx +++ /dev/null @@ -1,12 +0,0 @@ -The eBPF dataplane mode has several advantages over standard Linux networking pipeline mode: - -- It scales to higher throughput. -- It uses less CPU per GBit. -- It has native support for Kubernetes services (without needing kube-proxy) that: - - - Reduces first packet latency for packets to services. - - Preserves external client source IP addresses all the way to the pod. - - Supports DSR (Direct Server Return) for more efficient service routing. - - Uses less CPU than kube-proxy to keep the dataplane in sync. - -To learn more and see performance metrics from our test environment, see the blog, [Introducing the Calico eBPF dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/). diff --git a/calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx b/calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx deleted file mode 100644 index 05dde33612..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx +++ /dev/null @@ -1,15 +0,0 @@ -An EndpointPort associates a name with a particular TCP/UDP/SCTP port of the endpoint, allowing it to -be referenced as a named port in [policy rules](../../reference/resources/networkpolicy.mdx#entityrule). - -| Field | Description | Accepted Values | Schema | Default | -| -------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------ | ------- | -| name | The name to attach to this port, allowing it to be referred to in [policy rules](../../reference/resources/networkpolicy.mdx#entityrule). Names must be unique within an endpoint. | | string | | -| protocol | The protocol of this named port. | `TCP`, `UDP`, `SCTP` | string | | -| port | The workload port number. | `1`-`65535` | int | | - -:::note - -On their own, EndpointPort entries don't result in any change to the connectivity of the port. -They only have an effect if they are referred to in policy. - -::: diff --git a/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx b/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx deleted file mode 100644 index e013a5e9f4..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx +++ /dev/null @@ -1,34 +0,0 @@ -Entity rules specify the attributes of the source or destination of a packet that must match for the rule as a whole -to match. Packets are matched on their IPs and ports. If the rule contains multiple match criteria (for example, an -IP and a port) then all match criteria must match for the rule as a whole to match. - -[Selectors](#selectors) offer a powerful way to select the source or destination to match based on labels. -Selectors can match [workload endpoints](../../reference/resources/workloadendpoint.mdx), host endpoint and -([namespaced](../../reference/resources/networkset.mdx) or -[global](../../reference/resources/globalnetworkset.mdx)) network sets. - -| Field | Description | Accepted Values | Schema | Default | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | ------------------------------------------- | ------- | -| nets | Match packets with IP in any of the listed CIDRs. | List of valid IPv4 CIDRs or list of valid IPv6 CIDRs (IPv4 and IPv6 CIDRs shouldn't be mixed in one rule) | list of cidrs | -| notNets | Negative match on CIDRs. Match packets with IP not in any of the listed CIDRs. | List of valid IPv4 CIDRs or list of valid IPv6 CIDRs (IPv4 and IPv6 CIDRs shouldn't be mixed in one rule) | list of cidrs | -| selector | Positive match on selected endpoints. If a `namespaceSelector` is also defined, the set of endpoints this applies to is limited to the endpoints in the selected namespaces. | Valid selector | [selector](#selectors) | | -| notSelector | Negative match on selected endpoints. If a `namespaceSelector` is also defined, the set of endpoints this applies to is limited to the endpoints in the selected namespaces. | Valid selector | [selector](#selectors) | | -| namespaceSelector | Positive match on selected namespaces. If specified, only workload endpoints in the selected Kubernetes namespaces are matched. Matches namespaces based on the labels that have been applied to the namespaces. Defines the scope that selectors will apply to, if not defined then selectors apply to the NetworkPolicy's namespace. Match a specific namespace by name using the `projectcalico.org/name` label. Select the non-namespaced resources like GlobalNetworkSet(s), host endpoints to which this policy applies by using `global()` selector. | Valid selector | [selector](#selectors) | | -| ports | Positive match on the specified ports | | list of [ports](#ports) | | -| notPorts | Negative match on the specified ports | | list of [ports](#ports) | | -| serviceAccounts | Match endpoints running under service accounts. If a `namespaceSelector` is also defined, the set of service accounts this applies to is limited to the service accounts in the selected namespaces. | | [ServiceAccountMatch](#serviceaccountmatch) | | -| services | Match the specified service(s). If specified on egress rule destinations, no other selection criteria can be set. If specified on ingress rule sources, only positive or negative matches on ports can be specified. | | [ServiceMatch](#servicematch) | | - -When using selectors in network policy, remember that selectors only match (known) resources, but _rules_ match -packets. A rule with a selector `all()` won't match "all packets", it will match "packets from all in-scope -endpoints and network sets". To match all packets, do not include a selector in the rule at all. - -:::note - -`notSelector` is somewhat subtle because the `not` in `notSelector` negates the packet match -rather than the selector: - -- `selector: !has(foo)` matches packets from/to endpoints and network sets that do not have the label "foo". -- `notSelector: has(foo)` matches packets from/to **anywhere** (including outside the cluster), **except** traffic from/to endpoints and network sets that have the label "foo". - -::: diff --git a/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx b/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx deleted file mode 100644 index 9439c58242..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx +++ /dev/null @@ -1,19 +0,0 @@ -You should configure a `node` resource for each -host running Felix. In this case, the database is initialized after -creating the first `node` resource. For a deployment that does not include -the {{prodname}}/BGP integration, the specification of a node resource just -requires the name of the node; for most deployments this will be the same as the -hostname. - -```bash -calicoctl create -f - < -EOF -``` - -The Felix logs should transition from periodic notifications -that Felix is in the state `wait-for-ready` to a stream of initialization -messages. diff --git a/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx b/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx deleted file mode 100644 index 613bb97c3d..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx +++ /dev/null @@ -1,23 +0,0 @@ -An HTTPMatch matches attributes of an HTTP request. The presence of an HTTPMatch clause on a Rule will cause that rule to only match HTTP traffic. Other application layer protocols will not match the rule. - -Example: - -```yaml -http: - methods: ['GET', 'PUT'] - paths: - - exact: '/projects/calico' - - prefix: '/users' -``` - -| Field | Description | Schema | -| ------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------- | -| methods | Match HTTP methods. Case sensitive. [Standard HTTP method descriptions.](https://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html) | list of strings | -| paths | Match HTTP paths. Case sensitive. | list of [HTTPPathMatch](#httppathmatch) | - -### HTTPPathMatch - -| Syntax | Example | Description | -| ------ | ------------------- | ------------------------------------------------------------------------------- | -| exact | `exact: "/foo/bar"` | Matches the exact path as written, not including the query string or fragments. | -| prefix | `prefix: "/keys"` | Matches any path that begins with the given prefix. | diff --git a/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx b/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx deleted file mode 100644 index 1adb456472..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx +++ /dev/null @@ -1,4 +0,0 @@ -| Field | Description | Accepted Values | Schema | Default | -| ----- | ------------------- | -------------------- | ------- | ------- | -| type | Match on ICMP type. | Can be integer 0-254 | integer | -| code | Match on ICMP code. | Can be integer 0-255 | integer | diff --git a/calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx b/calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx deleted file mode 100644 index 9cfa2fb904..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx +++ /dev/null @@ -1,6 +0,0 @@ -IPNAT contains a single NAT mapping for a WorkloadEndpoint resource. - -| Field | Description | Accepted Values | Schema | Default | -| ---------- | ------------------------------------------- | ------------------ | ------ | ------- | -| internalIP | The internal IP address of the NAT mapping. | A valid IP address | string | | -| externalIP | The external IP address. | A valid IP address | string | | diff --git a/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx b/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx deleted file mode 100644 index 6dcce7f8fe..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx +++ /dev/null @@ -1,16 +0,0 @@ -## Configuration for etcd authentication - -If your etcd cluster has authentication enabled, you must also configure the -relevant {{prodname}} components with an etcd user name and password. You -can create a single etcd user for {{prodname}} that has permission to read -and write any key beginning with `/calico/`, or you can create specific etcd -users for each component, with more precise permissions. - -This table sets out where to configure each component of {{prodname}} for -OpenStack, and the detailed access permissions that each component needs: - -| Component | Configuration | Access | -| -------------- | -------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | -| Felix | `CALICO_ETCD_USERNAME` and `CALICO_ETCD_PASSWORD` variables in Felix's environment on each compute node. | [See here](../../reference/etcd-rbac/calico-etcdv3-paths.mdx#felix-as-a-stand-alone-process) | -| Neutron driver | `etcd_username` and `etcd_password` in `[calico]` section of `/etc/neutron/neutron.conf` on each control node. | [See here](../../reference/etcd-rbac/calico-etcdv3-paths.mdx#openstack-calico-driver-for-neutron) | -| DHCP agent | `etcd_username` and `etcd_password` in `[calico]` section of `/etc/neutron/neutron.conf` on each compute node. | [See here](../../reference/etcd-rbac/calico-etcdv3-paths.mdx#openstack-calico-dhcp-agent) | diff --git a/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx b/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx deleted file mode 100644 index 82f6bc3365..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx +++ /dev/null @@ -1,33 +0,0 @@ -{{prodname}} supports the following syntaxes for expressing ports. - -| Syntax | Example | Description | -| --------- | ---------- | ------------------------------------------------------------------- | -| int | 80 | The exact (numeric) port specified | -| start:end | 6040:6050 | All (numeric) ports within the range start ≤ x ≤ end | -| string | named-port | A named port, as defined in the ports list of one or more endpoints | - -An individual numeric port may be specified as a YAML/JSON integer. A port range or -named port must be represented as as a string. For example, this would be a valid list of ports: - -```yaml -ports: [8080, '1234:5678', 'named-port'] -``` - -#### Named ports - -Using a named port in an `EntityRule`, instead of a numeric port, gives a layer of indirection, -allowing for the named port to map to different numeric values for each endpoint. - -For example, suppose you have multiple HTTP servers running as workloads; some exposing their HTTP -port on port 80 and others on port 8080. In each workload, you could create a named port called -`http-port` that maps to the correct local port. Then, in a rule, you could refer to the name -`http-port` instead of writing a different rule for each type of server. - -:::note - -Since each named port may refer to many endpoints (and {{prodname}} has to expand a named port into -a set of endpoint/port combinations), using a named port is considerably more expensive in terms -of CPU than using a simple numeric port. We recommend that they are used sparingly, only where -the extra indirection is required. - -::: diff --git a/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx b/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx deleted file mode 100644 index fdbdc317fb..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx +++ /dev/null @@ -1,46 +0,0 @@ -A single rule matches a set of packets and applies some action to them. When multiple rules are specified, they -are executed in order. - -| Field | Description | Accepted Values | Schema | Default | -| ----------- | ------------------------------------------------------------------------------------------ | ------------------------------------------------------------ | ----------------------------- | ------- | -| metadata | Per-rule metadata. | | [RuleMetadata](#rulemetadata) | | -| action | Action to perform when matching this rule. | `Allow`, `Deny`, `Log`, `Pass` | string | | -| protocol | Positive protocol match. | `TCP`, `UDP`, `ICMP`, `ICMPv6`, `SCTP`, `UDPLite`, `1`-`255` | string \| integer | | -| notProtocol | Negative protocol match. | `TCP`, `UDP`, `ICMP`, `ICMPv6`, `SCTP`, `UDPLite`, `1`-`255` | string \| integer | | -| icmp | ICMP match criteria. | | [ICMP](#icmp) | | -| notICMP | Negative match on ICMP. | | [ICMP](#icmp) | | -| ipVersion | Positive IP version match. | `4`, `6` | integer | | -| source | Source match parameters. | | [EntityRule](#entityrule) | | -| destination | Destination match parameters. | | [EntityRule](#entityrule) | | -| http | Match HTTP request parameters. Application layer policy must be enabled to use this field. | | [HTTPMatch](#httpmatch) | | - -After a `Log` action, processing continues with the next rule; `Allow` and `Deny` are immediate -and final and no further rules are processed. - -An `action` of `Pass` in a `NetworkPolicy` or `GlobalNetworkPolicy` will skip over the remaining policies and jump to the -first [profile](../../reference/resources/profile.mdx) assigned to the endpoint, applying the policy configured in the -profile; if there are no Profiles configured for the endpoint the default applied action is `Deny`. - -### RuleMetadata - -Metadata associated with a specific rule (rather than the policy as a whole). The contents of the metadata does not affect how a rule is interpreted or enforced; it is -simply a way to store additional information for use by operators or applications that interact with {{prodname}}. - -| Field | Description | Schema | Default | -| ----------- | ----------------------------------- | ----------------------- | ------- | -| annotations | Arbitrary non-identifying metadata. | map of string to string | | - -Example: - -```yaml -metadata: - annotations: - app: database - owner: devops -``` - -Annotations follow the -[same rules as Kubernetes for valid syntax and character set](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/#syntax-and-character-set). - -On Linux with the iptables dataplane, rule annotations are rendered as comments in the form `-m comment --comment "="` on the iptables rule(s) that correspond -to the {{prodname}} rule. diff --git a/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx b/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx deleted file mode 100644 index 9d9fbc8c54..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx +++ /dev/null @@ -1,20 +0,0 @@ -Understanding scopes and the `all()` and `global()` operators: selectors have a scope of resources -that they are matched against, which depends on the context in which they are used. For example: - -- The `nodeSelector` in an `IPPool` selects over `Node` resources. - -- The top-level selector in a `NetworkPolicy` selects over the workloads _in the same namespace_ as the - `NetworkPolicy`. -- The top-level selector in a `GlobalNetworkPolicy` doesn't have the same restriction, it selects over all endpoints - including namespaced `WorkloadEndpoint`s and non-namespaced `HostEndpoint`s. - -- The `namespaceSelector` in a `NetworkPolicy` (or `GlobalNetworkPolicy`) _rule_ selects over the labels on namespaces - rather than workloads. - -- The `namespaceSelector` determines the scope of the accompanying `selector` in the entity rule. If no `namespaceSelector` - is present then the rule's `selector` matches the default scope for that type of policy. (This is the same namespace - for `NetworkPolicy` and all endpoints/network sets for `GlobalNetworkPolicy`) -- The `global()` operator can be used (only) in a `namespaceSelector` to change the scope of the main `selector` to - include non-namespaced resources such as [GlobalNetworkSet](../../reference/resources/globalnetworkset.mdx). - This allows namespaced `NetworkPolicy` resources to refer to global non-namespaced resources, which would otherwise - be impossible. diff --git a/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx b/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx deleted file mode 100644 index fca343f628..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx +++ /dev/null @@ -1,50 +0,0 @@ -A label selector is an expression which either matches or does not match a resource based on its labels. - -{{prodname}} label selectors support a number of operators, which can be combined into larger expressions -using the boolean operators and parentheses. - -| Expression | Meaning | -| ------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| **Logical operators** | -| `( )` | Matches if and only if `` matches. (Parentheses are used for grouping expressions.) | -| `! ` | Matches if and only if `` does not match. **Tip:** `!` is a special character at the start of a YAML string, if you need to use `!` at the start of a YAML string, enclose the string in quotes. | -| ` && ` | "And": matches if and only if both ``, and, `` matches | -| \ || \ | "Or": matches if and only if either ``, or, `` matches. | -| **Match operators** | -| `all()` | Match all in-scope resources. To match _no_ resources, combine this operator with `!` to form `!all()`. | -| `global()` | Match all non-namespaced resources. Useful in a `namespaceSelector` to select global resources such as global network sets. | -| `k == 'v'` | Matches resources with the label 'k' and value 'v'. | -| `k != 'v'` | Matches resources without label 'k' or with label 'k' and value _not_ equal to `v` | -| `has(k)` | Matches resources with label 'k', independent of value. To match pods that do not have label `k`, combine this operator with `!` to form `!has(k)` | -| `k in { 'v1', 'v2' }` | Matches resources with label 'k' and value in the given set | -| `k not in { 'v1', 'v2' }` | Matches resources without label 'k' or with label 'k' and value _not_ in the given set | -| `k contains 's'` | Matches resources with label 'k' and value containing the substring 's' | -| `k starts with 's'` | Matches resources with label 'k' and value starting with the substring 's' | -| `k ends with 's'` | Matches resources with label 'k' and value ending with the substring 's' | - -Operators have the following precedence: - -- **Highest**: all the match operators -- Parentheses `( ... )` -- Negation with `!` -- Conjunction with `&&` -- **Lowest**: Disjunction with `||` - -For example, the expression - -``` -! has(my-label) || my-label starts with 'prod' && role in {'frontend','business'} -``` - -Would be "bracketed" like this: - -``` -((!(has(my-label)) || ((my-label starts with 'prod') && (role in {'frontend','business'})) -``` - -It would match: - -- Any resource that did not have label "my-label". -- Any resource that both: - - Has a value for `my-label` that starts with "prod", and, - - Has a role label with value either "frontend", or "business". diff --git a/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx b/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx deleted file mode 100644 index 66cd92ee7c..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx +++ /dev/null @@ -1,6 +0,0 @@ -A ServiceAccountMatch matches service accounts in an EntityRule. - -| Field | Description | Schema | -| -------- | ------------------------------- | ---------------------- | -| names | Match service accounts by name | list of strings | -| selector | Match service accounts by label | [selector](#selectors) | diff --git a/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx b/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx deleted file mode 100644 index 2d47fed02c..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx +++ /dev/null @@ -1,6 +0,0 @@ -A ServiceMatch matches a service in an EntityRule. - -| Field | Description | Schema | -| --------- | ------------------------ | ------ | -| name | The service's name. | string | -| namespace | The service's namespace. | string | diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/.gitkeep b/calico_versioned_docs/version-3.25/_includes/release-notes/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.0-release-notes.mdx b/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.0-release-notes.mdx deleted file mode 100644 index b35cb7284c..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.0-release-notes.mdx +++ /dev/null @@ -1,140 +0,0 @@ -09 Jan 2023 - -### eBPF Dataplane Stability: Connect Time Load Balancing (CTLB) - -In certain scenarios, Calico would not update rapidly changing pods and IPs properly. We have added -some large changes to the eBPF dataplane to ensure that connect time load balancing works -in larger, rapidly changing environments. - -Pull Requests: - -- ebpf: ipv4 and ipv6 code separated to different object files so the v6 code gets never loaded outside tests. [calico #7093](https://github.com/projectcalico/calico/pull/7093) (@tomastigera) -- ebpf: CTLB resolves service when ipv4 is masked as ipv6. Commonly happens with grpc. [calico #7087](https://github.com/projectcalico/calico/pull/7087) (@tomastigera) -- ebpf: we can apply the CTLB-turned-off workaround just to UDP [calico #6783](https://github.com/projectcalico/calico/pull/6783) (@tomastigera) -- ebpf: host can accesses services without CTLB - gated feature [calico #6527](https://github.com/projectcalico/calico/pull/6527) (@tomastigera) - -### Improvements for clusters running at high scale - -This release includes several enhancements to Typha, Calico’s caching API server proxy, targeting high-scale clusters. -Typha acts as a proxy for the components within calico-node (such as Felix) when they watch resources in the -Kubernetes API server. This both reduces load on the API server and (by filtering out updates that Calico doesn't care about) -it reduces load in calico-node. - -- Typha now supports graceful shutdown. Rather than disconnecting all clients and shutting down immediately, it will - observe the `terminationGracePeriodSeconds` (which can now be set via the operator’s `Installation` resource). Typha - will disconnect clients gradually over the graceful shutdown window. This reduces disruption by avoiding restarting - many calico-node components at once. - - Since this is the first release with this feature, the benefit will only be seen when doing an upgrade _from_ this release. - -- Typha now supports compression on its protocol; this gives a 5:1 reduction in bandwidth use and snapshot data size. - Compression is automatically enabled when a supporting client (i.e. clients from this release onward) connects. -- Typha now shares computed (and compressed) snapshots between clients that connect at approximately the same time. - This significantly reduces CPU usage and the time to service all clients when many clients connect at once. In a - cluster with 150k Pods, generating a snapshot can take 2-3s of CPU time so, if 100 clients connect at once, there - can be a 200-300 second saving in CPU used, and a corresponding increase in throughput. Typha has prometheus metrics - to monitor the size of snapshots (typha_snapshot_raw_bytes / typha_snapshot_compressed_bytes) and the number of - snapshots that are reused for more than one client (typha_snapshots_reused). -- Typha now exports a Prometheus metric (typha_cache_size) for the size of its internal cache. -- Typha's Prometheus metrics have been improved and split by client type. Previously the metrics would mix - "high traffic" clients (such as Felix), with "low traffic" clients, making the metrics much less useful. - -### Bug fixes - -#### General - -- Fix incorrect cleanup in the service policy index after having both ingress and egress rules that reference the same service, resulting in missed IP set updates after one rule was deactivated. [calico #7148](https://github.com/projectcalico/calico/pull/7148) (@fasaxc) -- Fix panic in calico-node when invalid spoofed IP range provided on a pod. [calico #7076](https://github.com/projectcalico/calico/pull/7076) (@caseydavenport) -- fixed felix docs for bpf config options [calico #7065](https://github.com/projectcalico/calico/pull/7065) (@tomastigera) -- Fix missing nsswitch files in Typha causing localhost lookup fails [calico #6971](https://github.com/projectcalico/calico/pull/6971) (@wdoekes) -- Fix that Calico would try to use the IPV6 VXLAN or Wireguard tunnel devices for its BGP connections. [calico #6929](https://github.com/projectcalico/calico/pull/6929) (@coutinhop) -- Fix that Calico would try to use the VXLAN tunnel device for its BGP connections. [calico #6902](https://github.com/projectcalico/calico/pull/6902) (@caseydavenport) -- Add missing Auto option for IptablesBackend FelixConfiguration field [calico #6871](https://github.com/projectcalico/calico/pull/6871) (@huiyizzz) -- Fix an issue that caused annotations and labels to be overwritten during a calicoctl patch command [calico #6791](https://github.com/projectcalico/calico/pull/6791) (@mgleung) -- Fixed SyncLabels validation for Kubernetes datastore. [calico #6786](https://github.com/projectcalico/calico/pull/6786) (@huiyizzz) -- Fix issues with OCP installs using the wrong operator manifest. [calico #6724](https://github.com/projectcalico/calico/pull/6724) (@mgleung) -- Fix bug in IPv6 router ID calculation on IPv6 single-stack clusters that resulted in invalid router IDs being calculated. Note that this change will result in new router IDs being used for some IPv6 single-stack nodes. [calico #6674](https://github.com/projectcalico/calico/pull/6674) (@ramanujadasu) -- Fix that `calicoctl ipam release` could only release IPAM handles when running in etcd mode. [calico #6650](https://github.com/projectcalico/calico/pull/6650) (@fasaxc) -- Fix issue in L3RouteResolver CIDRTrie which could result in crashes when the IPv6 trie had a node with a /63 prefix. [calico #6532](https://github.com/projectcalico/calico/pull/6532) (@coutinhop) -- Fix nil error logged from kube-controllers health reporter [calico #6513](https://github.com/projectcalico/calico/pull/6513) (@caseydavenport) -- Fix that kube-controllers health checks didn't include a timeout on HTTP calls [calico #6513](https://github.com/projectcalico/calico/pull/6513) (@caseydavenport) -- Set IPIPMode and VXLANMode to the default "Never" if they are empty strings in IPPools. [calico #6498](https://github.com/projectcalico/calico/pull/6498) (@coutinhop) -- Fix that single-IP entries on BGPConfiguration LoadBalancerIPs were not advertised according to external traffic policy. [calico #6282](https://github.com/projectcalico/calico/pull/6282) (@mtryfoss) -- fix: ErrorActionPreference must continue for kubectl commands Issue #6127 [calico #6257](https://github.com/projectcalico/calico/pull/6257) (@chrisjohnson00) - -#### eBPF - -- ebpf: fix error setting accept_local - device may get stuck dirty [calico #7071](https://github.com/projectcalico/calico/pull/7071) (@tomastigera) -- ebpf: no src fixup on host iface for traffic returning from pod to the nodeport tunnel [calico #7039](https://github.com/projectcalico/calico/pull/7039) (@tomastigera) -- ebpf: XDP (notrack) policy debug output is removed/cleaned up when XDP program is removed (fix) [calico #6994](https://github.com/projectcalico/calico/pull/6994) (@tomastigera) -- ebpf: fixes ifstate leak when devices go down [calico #6946](https://github.com/projectcalico/calico/pull/6946) (@tomastigera) - -#### Windows - -- Fixed issue when Calico Windows hostprocess installation would fail to clean up a previous manual install of Calico Windows. [calico #6952](https://github.com/projectcalico/calico/pull/6952) (@coutinhop) -- Fix issues with the windows node names in GCE [calico #6470](https://github.com/projectcalico/calico/pull/6470) (@lmm) - -#### Wireguard - -- Limit rate of logging 'Wireguard is not supported' to fix log spam issues. [calico #6534](https://github.com/projectcalico/calico/pull/6534) (@coutinhop) - -### Other changes - -#### General - -- Felix now supports overriding the timeouts of its internal readiness/liveness watchdog. This is useful for dealing with issues "in prod" without needing a new release. The timeouts have also been tuned to reduce false positives. [calico #7061](https://github.com/projectcalico/calico/pull/7061) (@fasaxc) -- Typha now shares snapshots between clients that connect at roughly the same time. This dramatically reduces load when many clients connect at once. [calico #7047](https://github.com/projectcalico/calico/pull/7047) (@fasaxc) -- By default, skip bridge interface created by `docker network create` command in IP auto-detection [calico #7045](https://github.com/projectcalico/calico/pull/7045) (@masap) -- The Typha protocol now supports compression. This is enabled automatically if client and server both support it. [calico #7043](https://github.com/projectcalico/calico/pull/7043) (@fasaxc) -- Add ignorable interfaces via the BGPConfiguration API [calico #7006](https://github.com/projectcalico/calico/pull/7006) (@huiyizzz) -- Typha now supports graceful shut down, disconnecting calico-node pods at a configured rate instead of all at once. [calico #6973](https://github.com/projectcalico/calico/pull/6973) (@fasaxc) -- Update installation documentation for AWS to include information regarding and links for CSI driver installation [calico #6967](https://github.com/projectcalico/calico/pull/6967) (@Josh-Tigera) -- Update golang from 1.18.7 to 1.18.8 to avoid CVEs. [calico #6961](https://github.com/projectcalico/calico/pull/6961) (@Behnam-Shobiri) -- By default, skip 'podman' interface in IP auto-detection [calico #6950](https://github.com/projectcalico/calico/pull/6950) (@OrvilleQ) -- By default, skip 'nodelocaldns' interface in IP auto-detection [calico #6942](https://github.com/projectcalico/calico/pull/6942) (@cyclinder) -- ebpf: faster program loading for workload endpoint - unused programs not loaded. [calico #6933](https://github.com/projectcalico/calico/pull/6933) (@tomastigera) -- Remove problematic terminology from the codebase. [calico #6912](https://github.com/projectcalico/calico/pull/6912) (@fasaxc) -- Update Istio support to include Istio v1.15.2 [calico #6890](https://github.com/projectcalico/calico/pull/6890) (@frozenprocess) -- Add generalized TTL security mechanism (GTSM) via BGPPeer API [calico #6862](https://github.com/projectcalico/calico/pull/6862) (@Josh-Tigera) -- Retain OpenSSL FIPS dependent files in calico-node image. [calico #6852](https://github.com/projectcalico/calico/pull/6852) (@hjiawei) -- Disable VXLAN checksum offload by default for all kernels. If this was fixed, it has since been regressed. [calico #6842](https://github.com/projectcalico/calico/pull/6842) (@fasaxc) -- Improve formatting of logged-out health reports from components such as Felix. [calico #6833](https://github.com/projectcalico/calico/pull/6833) (@fasaxc) -- Update golang to 1.18.7 to avoid new CVEs. [calico #6824](https://github.com/projectcalico/calico/pull/6824) (@Behnam-Shobiri) -- Updated documentation list of images to pull for deploying from private registry (now includes node-driver-registrar) [calico #6812](https://github.com/projectcalico/calico/pull/6812) (@Josh-Tigera) -- Match full interface names in IP auto-detection default exclude list. [calico #6760](https://github.com/projectcalico/calico/pull/6760) (@neoaggelos) -- Update multiple golang dependencies. [calico #6719](https://github.com/projectcalico/calico/pull/6719) (@Behnam-Shobiri) -- Update the go version used to build the binaries from 1.18.5 to 1.18.6 [calico #6717](https://github.com/projectcalico/calico/pull/6717) (@Behnam-Shobiri) -- Calico now uses a faster JSON parsing library; this reduces CPU load and improves start-up latency. [calico #6705](https://github.com/projectcalico/calico/pull/6705) (@fasaxc) -- Reduce parsing overhead when parsing key/value pairs from Typha. [calico #6703](https://github.com/projectcalico/calico/pull/6703) (@fasaxc) -- Many of Typha's Prometheus metrics are now split by syncer (client) type, represented by a label "syncer" on the metrics. This prevents cross-talk where the syncers would all share the same metrics and the last writer to the metric would "win". [calico #6675](https://github.com/projectcalico/calico/pull/6675) (@fasaxc) -- The vxlanEnabled attribute from FelixConfiguration is now ignored for IPv6 VXLAN pools, allowing VXLAN to have IPv4 enabled independently from IPv6. [calico #6671](https://github.com/projectcalico/calico/pull/6671) (@muff1nman) -- Typha now uses a B-tree for its internal cache, which allows it to export a Prometheus metric, typha_snapshot_size, that gives the total size of its current snapshot of the Calico datastore. [calico #6666](https://github.com/projectcalico/calico/pull/6666) (@fasaxc) -- Use exponential backoff for kube-controllers health check timeout, retry sooner if failed. [calico #6610](https://github.com/projectcalico/calico/pull/6610) (@caseydavenport) -- Bump K8S_VERSION and KUBECTL_VERSION to v1.24.3 in metadata.mk [calico #6606](https://github.com/projectcalico/calico/pull/6606) (@coutinhop) -- Update Installation CRD to include new CSI changes introduced by recent operator API changes. [calico #6596](https://github.com/projectcalico/calico/pull/6596) (@Josh-Tigera) -- Helm: imagePullSecrets now also applied to tigera-operator serviceaccount [calico #6591](https://github.com/projectcalico/calico/pull/6591) (@tamcore) -- Retry kube-controllers initialization on failure [calico #6566](https://github.com/projectcalico/calico/pull/6566) (@tmjd) -- Update the base images to alpine 3.16 for the flexvolume and CSI driver [calico #6559](https://github.com/projectcalico/calico/pull/6559) (@mgleung) -- Windows quickstart install script creates calico service account token secret if missing [calico #6464](https://github.com/projectcalico/calico/pull/6464) (@lmm) -- Updating the dependencies - to avoid indirect vulnerabilities (CVE) detection from scanners. [calico #6452](https://github.com/projectcalico/calico/pull/6452) (@Behnam-Shobiri) -- added FeatureGates to Felix [calico #6381](https://github.com/projectcalico/calico/pull/6381) (@tomastigera) -- eBPF: Add BPF counters to XDP programs, and also load XDP programs using Libbpf instead of iproute2. [calico #6371](https://github.com/projectcalico/calico/pull/6371) (@mazdakn) -- The arm64 image of calico-kube-controllers now runs as non-root by default (similar to the amd64 image). [calico #6346](https://github.com/projectcalico/calico/pull/6346) (@ialidzhikov) - -#### eBPF - -- ebpf: Include enPxxxxxx in the default BPFDataIfacePattern [calico #7077](https://github.com/projectcalico/calico/pull/7077) (@TrevorTaoARM) -- ebpf: cleanup previously attached programs when BPFDataIfacePattern changes. [calico #7008](https://github.com/projectcalico/calico/pull/7008) (@tomastigera) -- ebpf : BPFDisableLinuxConntrack added to FelixConfiguration resource. [calico #6641](https://github.com/projectcalico/calico/pull/6641) (@mazdakn) -- ebpf: New felix config bpfL3IfacePattern allows to specify non calico L3 devices such as wireguard, vxlan. [calico #6612](https://github.com/projectcalico/calico/pull/6612) (@sridhartigera) - -#### Windows - -- Update Windows NSSM version [calico #6861](https://github.com/projectcalico/calico/pull/6861) (@song-jiang) -- windows: ensure calico-managed kubelet starts after the calico network has been initialized [calico #6656](https://github.com/projectcalico/calico/pull/6656) (@vitaliy-leschenko) - -#### OpenStack - -- Calico for OpenStack: remove iptables programming by the DHCP agent that is no longer needed, and that was increasing the need for Felix to resync Calico's iptables programming. Existing users will see issues - i.e. a VM failing to learn its IP address at boot time - if their VM OS is old enough to have unfixed DHCP client software. In that case the remedy is to update the VM OS. For example, in Tigera's own testing, we updated from CirrOS 0.3.4 to CirrOS 0.6.0. [calico #6857](https://github.com/projectcalico/calico/pull/6857) (@tj90241) -- Calico for OpenStack: prime the project (aka tenant) data cache on Neutron server startup [calico #6839](https://github.com/projectcalico/calico/pull/6839) (@tj90241) -- Allow Calico to set MTU in OpenStack [calico #6725](https://github.com/projectcalico/calico/pull/6725) (@nelljerram) diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.1-release-notes.mdx b/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.1-release-notes.mdx deleted file mode 100644 index 6d2341fc0d..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.1-release-notes.mdx +++ /dev/null @@ -1,16 +0,0 @@ -31 Mar 2023 - -### Bug fixes - - - Prevents Node kube-controller's internal pod cache from getting out-of-sync thus leaking memory. [calico #7503](https://github.com/projectcalico/calico/pull/7503) (@dilyevsky) - - Fix a panic in BPF mode when iterating over a per-CPU map with Debug enabled. [calico #7379](https://github.com/projectcalico/calico/pull/7379) (@fasaxc) - - Fix that the tunnel IP allocator did not respond to changes in the IP pool's allowedUses field. [calico #7360](https://github.com/projectcalico/calico/pull/7360) (@fasaxc) - - s390x: fix image mislabel in cni, typha and kube-controllers [calico #7315](https://github.com/projectcalico/calico/pull/7315) (@huoqifeng) - - Fix generation of `operator-crds.yaml` manifest. [calico #7217](https://github.com/projectcalico/calico/pull/7217) (@caseydavenport) - -### Other changes - - - ebpf: Jumpmap version incremented to prevent failures when upgrading from earlier calico versions [calico #7487](https://github.com/projectcalico/calico/pull/7487) (@tomastigera) - - Performance: on kernel 4.10+, use kernel-side route filtering when listing routes. Dramatically reduces CPU usage (and garbage collection) on systems with many interfaces and/or routes. [calico #7381](https://github.com/projectcalico/calico/pull/7381) (@fasaxc) - - ocp.tgz now hosted on GitHub [calico #7214](https://github.com/projectcalico/calico/pull/7214) (@caseydavenport) - - Enable s390x architecture support in 3.25 [calico #7210](https://github.com/projectcalico/calico/pull/7210) (@huoqifeng) diff --git a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.2-release-notes.mdx b/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.2-release-notes.mdx deleted file mode 100644 index 2269ba29a1..0000000000 --- a/calico_versioned_docs/version-3.25/_includes/release-notes/_v3.25.2-release-notes.mdx +++ /dev/null @@ -1,24 +0,0 @@ -6 September 2023 - -#### Bug fixes - - - Typha: move TLS handshake to per-connection goroutine to prevent main goroutine from stalling on an unclean handshake. [calico #7976](https://github.com/projectcalico/calico/pull/7976) (@fasaxc) - - Fix panic when running 'calicoctl get nodes' when ASNumber was not present in the default BGPConfiguration. [calico #7861](https://github.com/projectcalico/calico/pull/7861) (@coutinhop) - - ebpf: fixes felix panic upon restart in debug mode when there are existing policy counters [calico #7798](https://github.com/projectcalico/calico/pull/7798) (@tomastigera) - - ebpf: fix applyOnforward=false in global policies [calico #7725](https://github.com/projectcalico/calico/pull/7725) (@tomastigera) - - Update pin to use fixed calico/bird image to fix node ST failures. [calico #7563](https://github.com/projectcalico/calico/pull/7563) (@coutinhop) - - Prevents Node kube-controller's internal pod cache from getting out-of-sync thus leaking memory. [calico #7503](https://github.com/projectcalico/calico/pull/7503) (@dilyevsky) - - Fix the auto iptables detection if ip_tables.ko preloaded on RHEL/CentOS 8 [calico #7460](https://github.com/projectcalico/calico/pull/7460) (@yankay) - -#### Other changes - - - Update Calico VPP to v3.25.1 [calico #7535](https://github.com/projectcalico/calico/pull/7535) (@sknat) - - Remove usage of deprecated '--logtostderr' command line flag. [calico #7515](https://github.com/projectcalico/calico/pull/7515) (@coutinhop) - -#### Known issues - -- Calico panics if kube-proxy or other components are using native `nftables` rules instead of the `iptables-nft` compatibility shim. - - Until Calico supports native nftables mode, we recommend that you continue to use the iptables-nft compatibility layer for all components. (The compatibility layer was the only option before Kubernetes v1.29 added alpha-level `nftables` support.) - - Do not run Calico in "legacy" iptables mode on a system that is also using `nftables`. Although this combination does not panic or fail (at least on kernels that support both), the interaction between `iptables` "legacy" mode and `nftables` is confusing: both `iptables` and `nftables` rules can be executed on the same packet, leading to policy verdicts being "overturned". Note that this issue applies to all previous versions of {{prodname}}. \ No newline at end of file diff --git a/calico_versioned_docs/version-3.25/about/about-ebpf.mdx b/calico_versioned_docs/version-3.25/about/about-ebpf.mdx deleted file mode 100644 index e764291b4b..0000000000 --- a/calico_versioned_docs/version-3.25/about/about-ebpf.mdx +++ /dev/null @@ -1,163 +0,0 @@ ---- -description: Learn about eBPF! ---- - -# About eBPF - - - -:::note - -This guide provides optional background education, including -education that is not specific to {{prodname}}. - -::: - -eBPF is a Linux kernel feature that allows fast yet safe mini-programs to be loaded into the kernel to -customise its operation. - -In this guide you will learn: - -- General background on eBPF. -- Various uses of eBPF. -- How {{prodname}} uses eBPF in the eBPF dataplane. - -## What is eBPF? - -eBPF is a virtual machine embedded within the Linux kernel. It allows small programs to be loaded into the kernel, -and attached to hooks, which are triggered when some event occurs. This allows the behaviour of the kernel to be -(sometimes heavily) customised. While the eBPF virtual machine is the same for each type of hook, the capabilities -of the hooks vary considerably. Since loading programs into the kernel could be dangerous; the kernel runs all -programs through a very strict static verifier; the verifier sandboxes the program, ensuring it can only access -allowed parts of memory and ensuring that it must terminate quickly. - -## Why is it called eBPF? - -eBPF stands for "extended Berkeley Packet Filter". The Berkeley Packet Filter was an earlier, more specialised -virtual machine that was tailored for filtering packets. Tools such as `tcpdump` use this "classic" BPF VM to select -packets that should be sent to userspace for analysis. eBPF is a considerably extended version of BPF that -is suitable for general purpose use inside the kernel. While the name has stuck, eBPF can be used for a lot more -than just packet filtering. - -## What can eBPF do? - -### Types of eBPF program - -There are several classes of hooks to which eBPF programs can be attached within the kernel. The capabilities of an -eBPF program depend hugely on the hook to which it is attached: - -- **Tracing** programs can be attached to a significant proportion of the functions in the kernel. Tracing - programs are useful for collecting statistics and deep-dive debugging of the kernel. _Most_ tracing hooks only allow - read-only access to the data that the function is processing but there are some that allow data to be modified. - The {{prodname}} team use tracing programs to help debug {{prodname}} during development; for example, - to figure out why the kernel unexpectedly dropped a packet. - -- **Traffic Control** (`tc`) programs can be attached at ingress and egress to a given network device. The kernel - executes the programs once for each packet. Since the hooks are for packet processing, the kernel allows - the programs to modify or extend the packet, drop the packet, mark it for queueing, or redirect the packet to - another interface. {{prodname}}'s eBPF dataplane is based on this type of hook; we use tc programs to load - balance Kubernetes services, to implement network policy, and, to create a fast-path for traffic of established - connections. - -- **XDP**, or "eXpress Data Path", is actually the name of an eBPF hook. Each network device has an XDP ingress hook - that is triggered once for each incoming packet before the kernel allocates a socket buffer for the packet. XDP - can give outstanding performance for use cases such as DoS protection (as supported in {{prodname}}'s standard Linux - dataplane) and ingress load balancing (as used in facebook's Katran). The downside of XDP is that it requires - network device driver support to get good performance. XDP isn't sufficient on its own to implement all of the logic - needed for Kubernetes pod networking, but a combination of XDP and Traffic Control hooks works well. - -- Several types of **socket** programs hook into various operations on sockets, allowing the eBPF program to, for - example, change the destination IP of a newly-created socket, or force a socket to bind to the "correct" source - IP address. {{prodname}} uses such programs to do connect-time load balancing of Kubernetes Services; this - reduces overhead because there is no [DNAT](about-networking.mdx#NAT) on the packet processing path. - -- There are various security-related hooks that allow for program behaviour to be policed in various ways. For - example, the **seccomp** hooks allow for syscalls to be policed in fine-grained ways. - -- And... probably a few more hooks by the time you read this; eBPF is under heavy development in the kernel. - -The kernel exposes the capabilities of each hook via "helper functions". For example, the `tc` hook has a helper -function to resize the packet, but that helper would not be available in a tracing hook. One of the challenges of -working with eBPF is that different kernel versions support different helpers and lack of a helper can make it -impossible to implement a particular feature. - -### BPF maps - -Programs attached to eBPF hooks are able to access BPF "maps". BPF maps have two main uses: - -- They allow BPF programs to store and retrieve long-lived data. - -- They allow communication between BPF programs and user-space programs. BPF programs can read data that was written - by userspace and vice versa. - -There are many types of BPF maps, including some special types that allow jumping between programs, and, some that act -as queues and stacks rather than strictly as key/value maps. {{prodname}} uses maps to keep track of active -connections, and, to configure the BPF programs with policy and service NAT information. Since map accesses can be -relatively expensive, {{prodname}} aims to do a single map lookup only for each packet on an established flow. - -The contents of bpf maps can be inspected using the command-line tool, `bpftool`, which is provided with the kernel. - -## {{prodname}}'s eBPF dataplane - -{{prodname}}'s eBPF dataplane is an alternative to our standard Linux dataplane (which is iptables based). -While the standard dataplane focuses on compatibility by inter-working with kube-proxy, and your own iptables rules, -the eBPF dataplane focuses on performance, latency and improving user experience with features that aren't possible -in the standard dataplane. As part of that, the eBPF dataplane replaces kube-proxy with an eBPF implementation. -The main "user experience" feature is to preserve the source IP of traffic from outside the cluster when traffic hits a -NodePort; this makes your server-side logs and network policy much more useful on that path. - -### Feature comparison - -While the eBPF dataplane has some features that the standard Linux dataplane lacks, the reverse is also true: - -| Factor | Standard Linux Dataplane | eBPF dataplane | -| ------------------------------------ | ----------------------------------------------------- | -------------------------------------------------------------- | -| Throughput | Designed for 10GBit+ | Designed for 40GBit+ | -| First packet latency | Low (kube-proxy service latency is bigger factor) | Lower | -| Subsequent packet latency | Low | Lower | -| Preserves source IP within cluster | Yes | Yes | -| Preserves external source IP | Only with `externalTrafficPolicy: Local` | Yes | -| Direct Server Return | Not supported | Supported (requires compatible underlying network) | -| Connection tracking | Linux kernel's conntrack table (size can be adjusted) | BPF map (fixed size) | -| Policy rules | Mapped to iptables rules | Mapped to BPF instructions | -| Policy selectors | Mapped to IP sets | Mapped to BPF maps | -| Kubernetes services | kube-proxy iptables or IPVS mode | BPF program and maps | -| IPIP | Supported | Supported (no performance advantage due to kernel limitations) | -| VXLAN | Supported | Supported | -| Wireguard | Supported (IPv4 and IPv6) | Supported (IPv4) | -| Other routing | Supported | Supported | -| Supports third party CNI plugins | Yes (compatible plugins only) | Yes (compatible plugins only) | -| Compatible with other iptables rules | Yes (can write rules above or below other rules) | Partial; iptables bypassed for workload traffic | -| Host endpoint policy | Supported | Supported | -| Enterprise version | Available | Available | -| XDP DoS Protection | Supported | Supported | -| IPv6 | Supported | Not supported (yet) | - -### Architecture overview - -{{prodname}}'s eBPF dataplane attaches eBPF programs to the `tc` hooks on each {{prodname}} interface as -well as your data and tunnel interfaces. This allows {{prodname}} to spot workload packets early and handle them -through a fast-path that bypasses iptables and other packet processing that the kernel would normally do. - -![Diagram showing the packet path for pod-to-pod networking; a BPF program is attached to the client pod's veth interface; it does a conntrack lookup in a BPF map, and forwards the packet to the second pod directly, bypassing iptables](/img/calico/bpf-pod-to-pod.svg 'Pod-to-pod packet path with eBPF enabled') - -The logic to implement load balancing and packet parsing is pre-compiled ahead of time and relies on a set of BPF -maps to store the NAT frontend and backend information. One map stores the metadata of the service, allowing -for `externalTrafficPolicy` and "sticky" services to be honoured. A second map stores the IPs of the backing pods. - -In eBPF mode, {{prodname}} converts your policy into optimised eBPF bytecode, using BPF maps to store the IP sets -matched by policy selectors. - -![Detail of BPF program showing that packets are sent to a separate (generated) policy program,](/img/calico/bpf-policy.svg 'Expanded view of tc program showing policy.') - -To improve performance for services, {{prodname}} also does connect-time load balancing by hooking into the -socket BPF hooks. When a program tries to connect to a Kubernetes service, {{prodname}} intercepts the connection -attempt and configures the socket to connect directly to the backend pod's IP instead. This removes _all_ -NAT overhead from service connections. - -![Diagram showing BPF program attached to socket connect call; it does NAT at connect time,](/img/calico/bpf-connect-time.svg 'BPF program attached to socket connect call.') - -## Additional resources - -- For more information and performance metrics for the eBPF dataplane, see the [announcement blog post](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/). -- If you'd like to try eBPF mode in your Kubernetes cluster, follow the [Enable the eBPF dataplane](../operations/ebpf/enabling-ebpf.mdx) guide. diff --git a/calico_versioned_docs/version-3.25/about/about-k8s-networking.mdx b/calico_versioned_docs/version-3.25/about/about-k8s-networking.mdx deleted file mode 100644 index f8257baedf..0000000000 --- a/calico_versioned_docs/version-3.25/about/about-k8s-networking.mdx +++ /dev/null @@ -1,132 +0,0 @@ ---- -description: Learn about Kubernetes networking! ---- - -# About Kubernetes Networking - - - -:::note - -This guide provides optional background education, not specific to {{prodname}}. - -::: - -Kubernetes defines a network model that helps provide simplicity and consistency across a range of networking -environments and network implementations. The Kubernetes network model provides the foundation for understanding how -containers, pods, and services within Kubernetes communicate with each other. This guide explains the key concepts and -how they fit together. - -In this guide you will learn: - -- The fundamental network behaviors the Kubernetes network model defines. -- How Kubernetes works with a variety of different network implementations. -- What Kubernetes Services are. -- How DNS works within Kubernetes. -- What "NAT outgoing" is and when you would want to use it. -- What "dual stack" is. - -## The Kubernetes network model - -The Kubernetes network model specifies: - -- Every pod gets its own IP address -- Containers within a pod share the pod IP address and can communicate freely with each other -- Pods can communicate with all other pods in the cluster using pod IP addresses (without - [NAT](about-networking.mdx#nat)) -- Isolation (restricting what each pod can communicate with) is defined using network policies - -As a result, pods can be treated much like VMs or hosts (they all have unique IP addresses), and the containers within -pods very much like processes running within a VM or host (they run in the same network namespace and share an IP -address). This model makes it easier for applications to be migrated from VMs and hosts to pods managed by Kubernetes. -In addition, because isolation is defined using network policies rather than the structure of the network, the network -remains simple to understand. This style of network is sometimes referred to as a "flat network". - -Note that, although very rarely needed, Kubernetes does also support the ability to map host ports through to pods, or -to run pods directly within the host network namespace sharing the host's IP address. - -## Kubernetes network implementations - -Kubernetes built in network support, kubenet, can provide some basic network connectivity. However, it is more common to -use third party network implementations which plug into Kubernetes using the CNI (Container Network Interface) API. - -There are lots of different kinds of CNI plugins, but the two main ones are: - -- network plugins, which are responsible for connecting pod to the network -- IPAM (IP Address Management) plugins, which are responsible for allocating pod IP addresses. - -{{prodname}} provides both network and IPAM plugins, but can also integrate and work seamlessly with some other CNI -plugins, including AWS, Azure, and Google network CNI plugins, and the host local IPAM plugin. This flexibility allows -you to choose the best networking options for your specific needs and deployment environment. You can read more about -this in the {{prodname}} [determine best networking option](../networking/determine-best-networking.mdx) -guide. - -## Kubernetes Services - -Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) provide a way of abstracting access to a group of pods as a network service. -The group of pods is usually defined using a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels). -Within the cluster the network service is usually represented as virtual IP address, and kube-proxy load balances connections to the virtual IP across the group of pods backing the service. -The virtual IP is discoverable through Kubernetes DNS. -The DNS name and virtual IP address remain constant for the life time of the service, even though the pods backing the service may be created or destroyed, and the number of pods backing the service may change over time. - -Kubernetes Services can also define how a service is accessed from outside of the cluster, for example using - -- a node port, where the service can be accessed via a specific port on every node -- or a load balancer, whether a network load balancer provides a virtual IP address that the service can be accessed via - from outside the cluster. - -Note that when using {{prodname}} in on-prem deployments you can also [advertise service IP addresses](../networking/configuring/advertise-service-ips.mdx) -, allowing services to be conveniently accessed without -going via a node port or load balancer. - -## Kubernetes DNS - -Each Kubernetes cluster provides a DNS service. Every pod and every service is discoverable through the Kubernetes DNS -service. - -For example: - -- Service: `my-svc.my-namespace.svc.cluster-domain.example` -- Pod: `pod-ip-address.my-namespace.pod.cluster-domain.example` -- Pod created by a deployment exposed as a service: - `pod-ip-address.deployment-name.my-namespace.svc.cluster-domain.example`. - -The DNS service is implemented as Kubernetes Service that maps to one or more DNS server pods (usually CoreDNS), that -are scheduled just like any other pod. Pods in the cluster are configured to use the DNS service, with a DNS search list -that includes the pod's own namespace and the cluster's default domain. - -This means that if there is a service named `foo` in Kubernetes namespace `bar`, then pods in the same namespace can -access the service as `foo`, and pods in other namespaces can access the service as `foo.bar` - -Kubernetes supports a rich set of options for controlling DNS in different scenarios. You can read more about these in -the Kubernetes guide [DNS for Services and Pods](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/). - -## NAT outgoing - -The Kubernetes network model specifies that pods must be able to communicate with each other directly using pod IP -addresses. But it does not mandate that pod IP addresses are routable beyond the boundaries of the cluster. Many -Kubernetes network implementations use [overlay networks](about-networking.mdx#overlay-networks). -Typically for these deployments, when a pod initiates a connection to an IP address outside of the cluster, the node -hosting the pod will SNAT (Source Network Address Translation) map the source address of the packet from the pod IP to -the node IP. This enables the connection to be routed across the rest of the network to the destination (because the -node IP is routable). Return packets on the connection are automatically mapped back by the node replacing the node IP -with the pod IP before forwarding the packet to the pod. - -When using {{prodname}}, depending on your environment, you can generally choose whether you prefer to run an -overlay network, or prefer to have fully routable pod IPs. You can read more about this in the {{prodname}} -[determine best networking option](../networking/determine-best-networking.mdx) guide. {{prodname}} also -allows you to [configure outgoing NAT](../networking/configuring/workloads-outside-cluster.mdx) for specific IP address -ranges if more granularity is desired. - -## Dual stack - -If you want to use a mix of IPv4 and IPv6 then you can enable Kubernetes [dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/) mode. When enabled, all -pods will be assigned both an IPv4 and IPv6 address, and Kubernetes Services can specify whether they should be exposed -as IPv4 or IPv6 addresses. - -## Additional resources - -- [The Kubernetes Network Model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#the-kubernetes-network-model) -- [Video: Everything you need to know about Kubernetes networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/) -- [Video: Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/) -- [Video: Everything you need to know about Kubernetes networking on Google Cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/) diff --git a/calico_versioned_docs/version-3.25/about/about-kubernetes-egress.mdx b/calico_versioned_docs/version-3.25/about/about-kubernetes-egress.mdx deleted file mode 100644 index 7ee249252a..0000000000 --- a/calico_versioned_docs/version-3.25/about/about-kubernetes-egress.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -description: Learn about Kubernetes egress! ---- - -# About Kubernetes egress - -:::note - -This guide provides optional background education, including -education that is not specific to {{prodname}}. - -::: - -In this guide you will learn: - -- What is Kubernetes egress? -- Why should you restrict egress traffic and how can you do it? -- What is "NAT outgoing" and when is it used? -- What is an egress gateway, and why might you want to use one? - -## What is Kubernetes egress? - -In this guide we are using the term Kubernetes egress to describe connections being made from pods to anything outside of the cluster. - -In contrast to ingress traffic, where Kubernetes has the [Ingress](about-kubernetes-ingress.mdx) -resource type to help manage the traffic, there is no Kubernetes Egress resource. Instead, how the egress traffic is -handled at a networking level is determined by the Kubernetes network implementation / CNI plugin being used by the -cluster. In addition, if a service mesh is being used, this can add egress behaviors on top of those the -network implementation provides. - -There are three areas of behavior worth understanding for egress traffic, so you can choose a networking and/or service -mesh setup that best suits your needs: - -- Restricting egress traffic -- Outgoing NAT behavior -- Egress gateways - -## Restricting egress traffic - -It's a common security requirement and best practice to restrict outgoing connections from the cluster. This is normally -achieved using [Network Policy](about-network-policy.mdx) to define egress rules for each -microservice, often in conjunction with a [default deny](about-network-policy.mdx#default-deny) -policy that ensures outgoing connections are denied by default, until a policy is defined to explicitly allow specific -traffic. - -One limitation when using Kubernetes Network Policy to restrict access to specific external resources, is that the external -resources need to be specified as IP addresses (or IP address ranges) within the policy rules. If the IP addresses -associated with an external resource change, then every policy that referenced those IP addresses needs to be updated with -the new IP addresses. This limitation can be circumvented using Calico [Network Sets](../network-policy/policy-rules/external-ips-policy.mdx) -, or Calico Enterprise's support for domain names in policy rules. - -In addition to using network policy, service meshes typically allow you to configure which external services each pod -can access. In the case of Istio, {{prodname}} can be integrated to enforce network policy at the service mesh -layer, including [L5-7 rules](../network-policy/istio/http-methods.mdx), as another alternative to using IP addresses in rules. To -learn more about the benefits of this kind of approach, read our [Adopt a zero trust network model for security ](../network-policy/adopt-zero-trust.mdx) - guide. - -Note in addition to everything mentioned so far, perimeter firewalls can also be used to restrict outgoing connections, -for example to allow connections only to particular external IP address ranges, or external services. However, since -perimeter firewalls typically cannot distinguish individual pods, the rules apply equally to all pods in the cluster. -This provides some defense in depth, but cannot replace the requirement for network policy. - -## NAT outgoing - -Network Address Translation ([NAT](https://en.wikipedia.org/wiki/Network_address_translation)) is the process of mapping an IP address in a packet -to a different IP address as the packet passes through the device performing the NAT. Depending on the use case, NAT can -apply to the source or destination IP address, or to both addresses. - -In the context of Kubernetes egress, NAT is used to allow pods to connect to services outside of the cluster if the pods -have IP addresses that are not routable outside of the cluster (for example, if the pod network is an overlay). - -For example, if a pod in an overlay network attempts to connect to an IP address outside of the cluster, then the -node hosting the pod uses SNAT (Source Network Address Translation) to map the non-routable source IP address of the -packet to the node's IP address before forwarding on the packet. The node then maps response packets coming in the -opposite direction back to the original pod IP address, so packets flow end-to-end in both directions, with neither -pod or external service being aware the mapping is happening. - -In most clusters this NAT behavior is configured statically across the whole of the cluster. When using -{{prodname}}, the NAT behavior can be configured at a more granular level for particular address ranges using [IP pools](../reference/resources/ippool.mdx) -. This effectively allows the scope of "non-routable" to be more -tightly defined than just "inside the cluster vs outside the cluster", which can be useful in some enterprise deployment -scenarios. - -## Egress gateways - -Another approach to Kubernetes egress is to route all outbound connections via one or more egress gateways. The gateways -SNAT (Source Network Address Translation) the connections so the external service being connected to sees the connection -as coming from the egress gateway. The main use case is to improve security, either with the egress gateway performing a -direct security role in terms of what connections it allows, or in conjunction with perimeter firewalls (or other -external entities). For example, so that perimeter firewalls see the connections coming from well known IP -addresses (the egress gateways) rather than from dynamic pod IP addresses they don't understand. - -Egress gateways are not a native concept in Kubernetes itself, but are implemented by some Kubernetes network -implementations and some service meshes. For example, Calico Enterprise provides egress gateway functionality, plus the -ability to map namespaces (or even individual pods) to specific egress gateways. Perimeter firewalls (or other external -security entities) can then effectively provide per namespace security controls, even though they do not have visibility -to dynamic pod IP addresses. - -As an alternative approach to egress gateways, {{prodname}} allows you to control pod IP address ranges based on -namespace, or node, or even at the individual pod level. Assuming no outgoing NAT is required, this provides a very -simple way for perimeter firewalls (or other external security entities) to integrate with Kubernetes for both ingress -and egress traffic. (Note that this approach relies on having enough address space available to sensibly assign IP -address ranges, for example to each namespace, so it can lead to IP address range exhaustion challenges for large scale -deployments. In these scenarios, using egress gateways is likely to be a better option.) - -## Additional resources - -- [Adopt a zero trust network model for security](../network-policy/adopt-zero-trust.mdx) -- [Use external IPs or networks rules in policy](../network-policy/policy-rules/external-ips-policy.mdx) -- [Enforce network policy using Istio](../network-policy/istio/app-layer-policy.mdx) -- [Use HTTP methods and paths in policy rules](../network-policy/istio/http-methods.mdx) -- [Restrict a pod to use an IP address in a specific range](../networking/ipam/legacy-firewalls.mdx) -- [Assign IP addresses based on topology](../networking/ipam/assign-ip-addresses-topology.mdx) \ No newline at end of file diff --git a/calico_versioned_docs/version-3.25/about/about-kubernetes-ingress.mdx b/calico_versioned_docs/version-3.25/about/about-kubernetes-ingress.mdx deleted file mode 100644 index 2ea2ea7e62..0000000000 --- a/calico_versioned_docs/version-3.25/about/about-kubernetes-ingress.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -description: Learn about Kubernetes Ingress! ---- - -# About Kubernetes Ingress - -:::note - -This guide provides optional background education, including -education that is not specific to {{prodname}}. - -::: - -In this guide you will learn: - -- What is Kubernetes Ingress? -- Why use ingress? -- What are the differences between different ingress implementations? -- How does ingress and network policy interact? -- How does ingress and services fit together under the covers? - -## What is Kubernetes Ingress? - -Kubernetes Ingress builds on top of Kubernetes [Services](about-kubernetes-services.mdx) to provide -load balancing at the application layer, mapping HTTP and HTTPS requests with particular domains or URLs to Kubernetes -services. Ingress can also be used to terminate SSL / TLS before load balancing to the service. - -The details of how Ingress is implemented depend on which [Ingress Controller](https://kubernetes.io/docs/concepts/services-networking/ingress-controllers/) you are using. The Ingress -Controller is responsible for monitoring Kubernetes [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) resources and provisioning / configuring one -or more ingress load balancers to implement the desired load balancing behavior. - -Unlike Kubernetes services, which are handled at the network layer (L3-4), ingress load balancers operate at the -application layer (L5-7). Incoming connections are terminated at the load balancer so it can inspect the individual HTTP / -HTTPS requests. The requests are then forwarded via separate connections from the load balancer to the chosen service -backing pods. As a result, network policy applied to the backing pods can restrict access to only allow connections from the load -balancer, but cannot restrict access to specific original clients. - -## Why use Kubernetes Ingress? - -Given that Kubernetes [Services](about-kubernetes-services.mdx) already provide a mechanism for load -balancing access to services from outside of the cluster, why might you want to use Kubernetes Ingress? - -The mainline use case is if you have multiple HTTP / HTTPS services that you want to expose through a single external IP -address, perhaps with each service having a different URL path, or perhaps as multiple different domains. This is lot -simpler from a client configuration point of view than exposing each service outside of the cluster using Kubernetes -Services, which would give each service a separate external IP address. - -If on the other hand, your application architecture is fronted by a single "front end" microservice then Kubernetes -Services likely already meet your needs. In this case you might prefer to not add Ingress to the picture, both from a -simplicity point of view, and potentially also so you can more easily restrict access to specific clients using network -policy. In effect, your "front end" microservice already plays the role of Kubernetes Ingress, in a way that is not that -dissimilar to [in-cluster ingress](#in-cluster-ingress-solutions) solutions discussed below. - -## Types of Ingress solutions - -Broadly speaking there are two types of ingress solutions: - -- In-cluster ingress - where ingress load balancing is performed by pods within the cluster itself. -- External ingress - where ingress load balancing is implemented outside of the cluster by - appliances or cloud provider capabilities. - -### In-cluster ingress solutions - -In-cluster ingress solutions use software load balancers running in pods within the cluster itself. There are many -different ingress controllers to consider that follow this pattern, including for example the NGINX ingress controller. - -The advantages of this approach are that you can: - -- horizontally scale your ingress solution up to the limits of Kubernetes -- choose the ingress controller that best suits your specific needs, for example, with particular load balancing - algorithms, or security options. - -To get your ingress traffic to the in-cluster ingress pods, the ingress pods are normally exposed externally as a -Kubernetes service, so you can use any of the standard ways of accessing the service from outside of the cluster. A -common approach is use an external network load balancer or service IP advertisement, with `externalTrafficPolicy:local`. -This minimizes the number of network hops, and retains the client source IP address, which allows network policy to be used -to restrict access to the ingress pods to particular clients if desired. - -![In-cluster ingress](/img/calico/ingress-in-cluster.svg) - -### External ingress solutions - -External ingress solutions use application load balancers outside of the cluster. The exact details and -features depend on which ingress controller you are using, but most cloud providers include an ingress controller that -automates the provisioning and management of the cloud provider's application load balancers to provide ingress. - -The advantages of this type of ingress solution is that your cloud provider handles the operational complexity of the -ingress for you. The downsides are a potentially more limited set of features compared to the rich range of in-cluster -ingress solutions, and the maximum number of services exposed by ingress being constrained by cloud provider specific -limits. - -![External ingress](/img/calico/ingres-external.svg) - -Note that most application load balancers support a basic mode of operation of forwarding traffic to the chosen service -backing pods via the [node port](about-kubernetes-services.mdx#node-port-services) of the -corresponding service. - -In addition to this basic approach of load balancing to service node ports, some cloud providers support a second mode -of application layer load balancing, which load balances directly to the pods backing each service, without going via -node-ports or other kube-proxy service handling. This has the advantage of eliminating the potential second network hop -associated with node ports load balancing to a pod on a different node. The potential disadvantage is that if you are -operating at very high scales, for example with hundreds of pods backing a service, you may exceed the application layer -load balancers maximum limit of IPs it can load balance to in this mode. In this case switching to an in-cluster ingress -solution is likely the better fit for you. - -## Show me everything! - -All the above diagrams focus on connection level (L5-7) representation of ingress and services. You can learn more about -the network level (L3-4) interactions involved in handling the connections, including which scenarios client source IP -addresses are maintained, in the [About Kubernetes Services](about-kubernetes-services.mdx) guide. - -If you are already up to speed on how services work under the covers, here are some more complete diagrams that show details of how services are load balanced at the network layer (L3-4). - -:::note - -You can successfully use ingress without needing to understand this next level of detail! So feel free to skip -over these diagrams if you don't want to dig deeper into how services and ingress interact under the covers. - -::: - -**In-cluster ingress solution exposed as service type `LoadBalancer` with `externalTrafficPolicy:local`** - -![In-cluster ingress with NLB local](/img/calico/ingress-in-cluster-nlb-local.svg) - -**External ingress solution via node ports** - -![External ingress via node port](/img/calico/ingress-external-node-ports.svg) - -**External ingress solution direct to pods** - -![External ingress direct to pods](/img/calico/ingress-external-direct-to-pods.svg) - -## Additional resources - -- [Video: Everything you need to know about Kubernetes Ingress networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-ingress-networking/) -- [Video: Everything you need to know about Kubernetes Services networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-services-networking/) diff --git a/calico_versioned_docs/version-3.25/about/about-kubernetes-services.mdx b/calico_versioned_docs/version-3.25/about/about-kubernetes-services.mdx deleted file mode 100644 index 7b54f0fc9e..0000000000 --- a/calico_versioned_docs/version-3.25/about/about-kubernetes-services.mdx +++ /dev/null @@ -1,142 +0,0 @@ ---- -description: Learn about Kubernetes services! ---- - -# About Kubernetes Services - -:::note - -This guide provides optional background education, including -education that is not specific to {{prodname}}. - -::: - -In this guide you will learn: - -- What are Kubernetes Services? -- What are the differences between the three main service types and what do you use them for? -- How do services and network policy interact? -- Some options for optimizing how services are handled. - -## What are Kubernetes Services? - -Kubernetes [Services](https://kubernetes.io/docs/concepts/services-networking/service/) provide a way of abstracting access to a group -of pods as a network service. The group of pods backing each service is usually defined using a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels). - -When a client connects to a Kubernetes service, the connection is load balanced to one of the pods backing the service, -as illustrated in this conceptual diagram: - -![Kubernetes Service conceptual diagram](/img/calico/k8s-service-concept.svg) - -There are three main types of Kubernetes services: - -- Cluster IP - which is the usual way of accessing a service from inside the cluster -- Node port - which is the most basic way of accessing a service from outside the cluster -- Load balancer - which uses an external load balancer as a more sophisticated way to access a service from outside the - cluster. - -## Cluster IP services - -The default service type is `ClusterIP`. This allows a service to be accessed within the cluster via a virtual IP -address, known as the service Cluster IP. The Cluster IP for a service is discoverable through Kubernetes DNS. For -example, `my-svc.my-namespace.svc.cluster-domain.example`. The DNS name and Cluster IP address remain constant for the -life time of the service, even though the pods backing the service may be created or destroyed, and the number of pods -backing the service may change over time. - -In a typical Kubernetes deployment, kube-proxy runs on every node and is responsible for intercepting connections to -Cluster IP addresses and load balancing across the group of pods backing each service. As part of this process -[DNAT](about-networking.mdx#nat) is used to map the destination IP address from the Cluster IP to the -chosen backing pod. Response packets on the connection then have the NAT reverse on their way back to the pod that -initiated the connection. - -![kube-proxy cluster IP](/img/calico/kube-proxy-cluster-ip.svg) - -Importantly, network policy is enforced based on the pods, not the service Cluster IP. (i.e. Egress network policy is -enforced for the client pod after the DNAT has changed the connection's destination IP to the chosen service backing -pod. And because only the destination IP for the connection is changed, ingress network policy for the backing pod sees the -original client pod as the source of the connection.) - -## Node port services - -The most basic way to access a service from outside the cluster is to use a service of type `NodePort`. A Node Port is a -port reserved on each node in the cluster through which the service can be accessed. In a typical Kubernetes deployment, -kube-proxy is responsible for intercepting connections to Node Ports and load balancing them across the pods backing -each service. - -As part of this process [NAT](about-networking.mdx#nat) is used to map the destination IP address and -port from the node IP and Node Port, to the chosen backing pod and service port. In addition the source IP address is -mapped from the client IP to the node IP, so that response packets on the connection flow back via the original node, -where the NAT can be reversed. (It's the node which performed the NAT that has the connection tracking state needed to -reverse the NAT.) - -![kube-proxy node port](/img/calico/kube-proxy-node-port.svg) - -Note that because the connection source IP address is SNATed to the node IP address, ingress network policy for the -service backing pod does not see the original client IP address. Typically this means that any such policy is limited to -restricting the destination protocol and port, and cannot restrict based on the client / source IP. This limitation can -be circumvented in some scenarios by using [externalTrafficPolicy](#externaltrafficpolicylocal) or by using -{{prodname}}'s eBPF dataplane [native service handling](#calico-ebpf-native-service-handling) (rather than kube-proxy) which preserves source IP address. - -## Load balancer services - -Services of type `LoadBalancer` expose the service via an external network load balancer (NLB). The exact type of -network load balancer depends on which public cloud provider or, if on-prem, which specific hardware load balancer integration is -integrated with your cluster. - -The service can be accessed from outside of the cluster via a specific IP address on the network load balancer, which by -default will load balance evenly across the nodes using the service node port. - -![kube-proxy load balancer](/img/calico/kube-proxy-load-balancer.svg) - -Most network load balancers preserve the client source IP address, but because the service then goes via a node port, -the backing pods themselves do not see the client IP, with the same implications for network policy. As with node -ports, this limitation can be circumvented in some scenarios by using [externalTrafficPolicy](#externaltrafficpolicylocal) -or by using {{prodname}}'s eBPF dataplane [native service handling](#calico-ebpf-native-service-handling) (rather -than kube-proxy) which preserves source IP address. - -## Advertising service IPs - -One alternative to using node ports or network load balancers is to advertise service IP addresses over BGP. This -requires the cluster to be running on an underlying network that supports BGP, which typically means an on-prem -deployment with standard Top of Rack routers. - -{{prodname}} supports advertising service Cluster IPs, or External IPs for services configured with one. If you are -not using Calico as your network plugin then [MetalLB](https://github.com/metallb/metallb) provides similar capabilities that work with a variety of different network -plugins. - -![kube-proxy service advertisement](/img/calico/kube-proxy-service-advertisement.svg) - -## externalTrafficPolicy:local - -By default, whether using service type `NodePort` or `LoadBalancer` or advertising service IP addresses over BGP, -accessing a service from outside the cluster load balances evenly across all the pods backing the service, independent -of which node the pods are on. This behavior can be changed by configuring the service with -`externalTrafficPolicy:local` which specifies that connections should only be load balanced to pods backing the service -on the local node. - -When combined with services of type `LoadBalancer` or with {{prodname}} service IP address advertising, traffic is -only directed to nodes that host at least one pod backing the service. This reduces the potential extra network hop -between nodes, and perhaps more importantly, to maintain the source IP address all the way to the pod, so network policy -can restrict specific external clients if desired. - -![kube-proxy service advertisement](/img/calico/kube-proxy-service-local.svg) - -Note that in the case of services of type `LoadBalancer`, not all Load Balancers support this mode. And in the case of -service IP advertisement, the evenness of the load balancing becomes topology dependent. In this case, pod anti-affinity -rules can be used to ensure even distribution of backing pods across your topology, but this does add some complexity to -deploying the service. - -## Calico eBPF native service handling - -As an alternative to using Kubernetes standard kube-proxy, {{prodname}}'s [eBPF dataplane](../operations/ebpf/enabling-ebpf.mdx) - supports native service handling. This preserves source IP to -simplify network policy, offers DSR (Direct Server Return) to reduce the number of network hops for return traffic, and -provides even load balancing independent of topology, with reduced CPU and latency compared to kube-proxy. - -![kube-proxy service advertisement](/img/calico/calico-native-service-handling.svg) - -# Additional resources - -- [Video: Everything you need to know about Kubernetes Services networking ](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-services-networking/) -- [Blog: Introducing the Calico eBPF dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/) -- [Blog: Hands on with Calico eBPF native service handling](https://www.projectcalico.org/hands-on-with-calicos-ebpf-service-handling/) diff --git a/calico_versioned_docs/version-3.25/about/about-network-policy.mdx b/calico_versioned_docs/version-3.25/about/about-network-policy.mdx deleted file mode 100644 index 78696632db..0000000000 --- a/calico_versioned_docs/version-3.25/about/about-network-policy.mdx +++ /dev/null @@ -1,244 +0,0 @@ ---- -description: Learn about network policy! ---- - -# About Network Policy - -:::note - -This guide provides optional background education, including -education that is not specific to {{prodname}}. - -::: - -Kubernetes and {{prodname}} provide network policy APIs to help you secure your workloads. - -In this guide you will learn: - -- What network policy is and why it is important. -- The differences between Kubernetes and Calico network policies and when you might want to use each. -- Some best practices for using network policy. - -## What is network policy? - -Network policy is the primary tool for securing a Kubernetes network. It allows you to easily restrict the network -traffic in your cluster so only the traffic that you want to flow is allowed. - -To understand the significance of network policy, let's briefly explore how network security was typically achieved -prior to network policy. Historically in enterprise networks, network security was provided by designing a physical -topology of network devices (switches, routers, firewalls) and their associated configuration. The physical topology -defined the security boundaries of the network. In the first phase of virtualization, the same network and network -device constructs were virtualized in the cloud, and the same techniques for creating specific network topologies of -(virtual) network devices were used to provide network security. Adding new applications or services often required -additional network design to update the network topology and network device configuration to provide the desired -security. - -In contrast, the [Kubernetes network model](about-k8s-networking.mdx) defines a "flat" -network in which every pod can communicate with all other pods in the cluster using pod IP addresses. This approach -massively simplifies network design and allows new workloads to be scheduled dynamically anywhere in the cluster with no -dependencies on the network design. - -In this model, rather than network security being defined by network topology boundaries, it is defined using network -policies that are independent of the network topology. Network policies are further abstracted from the network by using -label selectors as their primary mechanism for defining which workloads can talk to which workloads, rather than IP -addresses or IP address ranges. - -## Why is network policy important? - -In an age where attackers are becoming more and more sophisticated, network security as a line of defense is more important -than ever. - -While you can (and should) use firewalls to restrict traffic at the perimeters of your network (commonly referred to as -north-south traffic), their ability to police Kubernetes traffic is often limited to a granularity of the cluster as a -whole, rather than to specific groups of pods, due to the dynamic nature of pod scheduling and pod IP addresses. In -addition, the goal of most attackers once they gain a small foothold inside the perimeter is to move laterally (commonly -referred to as east-west) to gain access to higher value targets, which perimeter based firewalls can't police against. - -Network policy on the other hand is designed for the dynamic nature of Kubernetes by following the standard Kubernetes -paradigm of using label selectors to define groups of pods, rather than IP addresses. And because network policy is -enforced within the cluster itself it can police both north-south and east-west traffic. - -Network policy represents an important evolution of network security, not just because it handles the dynamic nature of -modern microservices, but because it empowers dev and devops engineers to easily define network security themselves, -rather than needing to learn low-level networking details or raise tickets with a separate team responsible for managing -firewalls. Network policy makes it easy to define intent, such as "only this microservice gets to connect to the -database", write that intent as code (typically in YAML files), and integrate authoring of network policies into git -workflows and CI/CD processes. - -:::note - -Note: Calico and Calico Enterprise offer capabilities that can help perimeter firewalls integrate -more tightly with Kubernetes. However, this does not remove the need or value of network policies within the cluster itself.) - -::: - -## Kubernetes network policy - -Kubernetes network policies are defined using the Kubernetes [NetworkPolicy](https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/) resource. - -The main features of Kubernetes network policies are: - -- Policies are namespace scoped (i.e. you create them within the context of a specific namespace just like, for example, pods) -- Policies are applied to pods using label selectors -- Policy rules can specify the traffic that is allowed to/from other pods, namespaces, or CIDRs -- Policy rules can specify protocols (TCP, UDP, SCTP), named ports or port numbers - -Kubernetes itself does not enforce network policies, and instead delegates their enforcement to network plugins. Most -network plugins implement the mainline elements of Kubernetes network policies, though not all implement every feature -of the specification. (Calico does implement every feature, and was the original reference implementation of Kubernetes -network policies.) - -To learn more about Kubernetes network policies, read the [Get started with Kubernetes network policy](../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx) - guide. - -## Calico network policy - -In addition to enforcing Kubernetes network policy, {{prodname}} supports its own -namespaced [NetworkPolicy](../reference/resources/networkpolicy.mdx) and non-namespaced -[GlobalNetworkPolicy](../reference/resources/globalnetworkpolicy.mdx) resources, which provide additional -features beyond those supported by Kubernetes network policy. This includes support for: - -- policy ordering/priority -- deny and log actions in rules -- more flexible match criteria for applying policies and in policy rules, including matching on Kubernetes - ServiceAccounts, and (if using Istio & Envoy) cryptographic identity and layer 5-7 match criteria such as HTTP & gRPC URLs. -- ability to reference non-Kubernetes workloads in polices, including matching on - [NetworkSets](../reference/resources/networkset.mdx) in policy rules - -While Kubernetes network policy applies only to pods, Calico network policy can be applied to multiple types of -endpoints including pods, VMs, and host interfaces. - -To learn more about Calico network policies, read the [Get started with Calico network policy](../network-policy/get-started/calico-policy/calico-network-policy.mdx) - guide. - -## Benefits of using {{prodname}} for network policy - -### Full Kubernetes network policy support - -Unlike some other network policy implementations, Calico implements the full set of Kubernetes network policy features. - -### Richer network policy - -Calico network policies allow even richer traffic control than Kubernetes network policies if you need it. In addition, -Calico network policies allow you to create policy that applies across multiple namespaces using GlobalNetworkPolicy -resources. - -### Mix Kubernetes and Calico network policy - -Kubernetes and Calico network policies can be mixed together seamlessly. One common use case for this is to split -responsibilities between security / cluster ops teams and developer / service teams. For example, giving the security / -cluster ops team RBAC permissions to define Calico policies, and giving developer / service teams RBAC permissions to -define Kubernetes network policies in their specific namespaces. As Calico policy rules can be ordered to be enforced -either before or after Kubernetes network policies, and can include actions such as deny and log, this allows the -security / cluster ops team to define basic higher-level more-general purpose rules, while empowering the developer / -service teams to define their own fine-grained constraints on the apps and services they are responsible for. - -For more flexible control and delegation of responsibilities between two or more teams, Calico Enterprise extends this -model to provide [hierarchical policy](#hierarchical-policy). - -![Example mix of network policy types](/img/calico/example-k8s-calico-policy-mix.svg) - -### Ability to protect hosts and VMs - -As {{prodname}} policies can be enforce on host interfaces, you can use them to protect your Kubernetes nodes (not -just your pods), including for example, limiting access to node ports from outside of the cluster. To learn more, check -out the {{prodname}} [policy for hosts](../network-policy/hosts/index.mdx) guides. - -### Integrates with Istio - -When used with Istio service mesh, {{prodname}} policy engine enforces the same policy model at the host networking -layer and at the service mesh layer, protecting your infrastructure from compromised workloads and protecting your -workloads from compromised infrastructure. This also avoids the need for dual provisioning of security at the service -mesh and infrastructure layers, or having to learn different policy models for each layer. - -### Extendable with Calico Enterprise - -Calico Enterprise adds even richer network policy capabilities, with the ability -to specify hierarchical policies, with each team have particular boundaries of trust, and FQDN / domain names in policy -rules for restricting access to specific external services. - -## Best practices for network policies - -### Ingress and egress - -At a minimum we recommend that every pod is protected by network policy ingress rules that restrict what is allowed -to connect to the pod and on which ports. The best practice is also to define network policy egress rules that restrict -the outgoing connections that are allowed by pods themselves. Ingress rules protect your pod from attacks outside of the -pod. Egress rules help protect everything outside of the pod if the pod gets compromised, reducing the attack surface to -make moving laterally (east-west) or to prevent an attacker from exfiltrating compromised data from your cluster (north-south). - -### Policy schemas - -Due to the flexibility of network policy and labelling, there are often multiple different ways of labelling and writing -policies that can achieve the same particular goal. One of the most common approaches is to have a small number of -global policies that apply to all pods, and then a single pod specific policy that defines all the ingress and egress -rules that are particular to that pod. - -For example: - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: front-end - namespace: staging -spec: - podSelector: - matchLabels: - app: back-end - ingress: - - from: - - podSelector: - matchLabels: - app: front-end - ports: - - protocol: TCP - port: 443 - egress: - - to: - - podSelector: - matchLabels: - app: database - ports: - - protocol: TCP - port: 27017 - -``` - -### Default deny - -One approach to ensuring these best practices are being followed is to define [default deny](../network-policy/get-started/kubernetes-default-deny.mdx) - network policies. These ensure that if no other policy is -defined that explicitly allows traffic to/from a pod, then the traffic will be denied. As a result, anytime a team -deploys a new pod, they are forced to also define network policy for the pod. It can be useful to use a {{prodname}} -GlobalNetworkPolicy for this (rather than needing to define a policy every time a new namespace is created) and to -include some exceptions to the default deny (for example to allow pods to access DNS). - -For example, you might use the following policy to default-deny all (non-system) pod traffic except for DNS queries to kube-dns/core-dns. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: default-app-policy -spec: - namespaceSelector: has(projectcalico.org/name) && projectcalico.org/name not in {"kube-system", "calico-system", "calico-apiserver"} - types: - - Ingress - - Egress - egress: - - action: Allow - protocol: UDP - destination: - selector: k8s-app == "kube-dns" - ports: - - 53 -``` - -### Hierarchical policy - -[Calico Enterprise](/calico-enterprise/latest/network-policy/policy-tiers/tiered-policy) supports hierarchical network policy using policy tiers. RBAC -for each tier can be defined to restrict who can interact with each tier. This can be used to delegate trust across -multiple teams. - -![Example tiers](/img/calico/example-tiers.svg) diff --git a/calico_versioned_docs/version-3.25/about/about-networking.mdx b/calico_versioned_docs/version-3.25/about/about-networking.mdx deleted file mode 100644 index 0508e962f3..0000000000 --- a/calico_versioned_docs/version-3.25/about/about-networking.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -description: Learn about networking! ---- - -# About Networking - -:::note - -This guide provides optional background education, not specific to {{prodname}}. - -::: - -You can get up and running with Calico by following any of the {{prodname}} [install guides](../getting-started/index.mdx) - without needing to be a networking expert. Calico hides the complexities for -you. However, if you would like to learn more about networking so you can better understand what is happening under the -covers, this guide provides a short introduction to some of the key fundamental networking concepts for anyone who is -not already familiar with them. - -In this guide you will learn: - -- The terms used to described different layers of the network. -- The anatomy of a network packet. -- What MTU is and why it makes a difference. -- How IP addressing, subnets, and IP routing works. -- What an overlay network is. -- What DNS and NAT are. - -## Network layers - -The process of sending and receiving data over a network is commonly categorized into 7 layers (referred to as the [OSI model](https://en.wikipedia.org/wiki/OSI_model)). The layers are -typically abbreviated as L1 - L7. You can think of data as passing through each of these layers in turn as it is sent or -received from an application, with each layer being responsible for a particular part of the processing required to -send or receive the data over the network. - -![OSI network layers diagram](/img/calico/osi-network-layers.svg) - -In a modern enterprise or public cloud network, the layers commonly map as follows: - -- L5-7: all the protocols most application developers are familiar with. e.g. HTTP, FTP, SSH, SSL, DNS. -- L4: TCP or UDP, including source and destination ports. -- L3: IP packets and IP routing. -- L2: Ethernet packets and Ethernet switching. - -## Anatomy of a network packet - -When sending data over the network, each layer in the network stack adds its own header containing the control/metadata -the layer needs to process the packet as it traverses the network, passing the resulting packet on to the next -layer of the stack. In this way the complete packet is produced, which includes all the control/metadata required by -every layer of the stack, without any layer understanding the data or needing to process the control/metadata of -adjacent network layers. - -![Anatomy of a network packet](/img/calico/anatomy-of-a-packet.svg) - -## IP addressing, subnets and IP routing - -The L3 network layer introduces IP addresses and typically marks the boundary between the part of networking that -application developers care about, and the part of networking that network engineers care about. In particular -application developers typically regard IP addresses as the source and destination of the network traffic, but have much -less of a need to understand L3 routing or anything lower in the network stack, which is more the domain of network -engineers. - -There are two variants of IP addresses: IPv4 and IPv6. - -- IPv4 addresses are 32 bits long and the most commonly used. They are typically represented as 4 bytes in decimal (each - 0-255) separated by dots. e.g. `192.168.27.64`. There are several ranges of IP addresses that are reserved as - "private", that can only be used within local private networks, are not routable across the internet. These can be - reused by enterprises as often as they want to. In contrast "public" IP addresses are globally unique across the whole - of the internet. As the number of network devices and networks connected to the internet has grown, public IPv4 - addresses are now in short supply. -- IPv6 addresses are 128 bits long and designed to overcome the shortage of IPv4 address space. They are typically - represented by 8 groups of 4 digit hexadecimal numbers. e.g. `1203:8fe0:fe80:b897:8990:8a7c:99bf:323d`. Due to the 128 - bit length, there's no shortage of IPv6 addresses. However, many enterprises have been slow to adopt IPv6, so for now - at least, IPv4 remains the default for many enterprise and data center networks. - -Groups of IP addresses are typically represented using [CIDR notation](https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing) that consists of an IP address and number of -significant bits on the IP address separated by a `/`. For example, `192.168.27.0/24` represents the group of 256 IP -addresses from `192.168.27.0` to `192.168.27.255`. - -A group of IP addresses within a single L2 network is referred to as a subnet. Within a subnet, packets can be sent -between any pair of devices as a single network hop, based solely on the L2 header (and footer). - -To send packets beyond a single subnet requires L3 routing, with each L3 network device (router) being responsible for -making decisions on the path to send the packet based on L3 routing rules. Each network device acting as a router has -routes that determine where a packet for a particular CIDR should be sent next. So for example, in a Linux system, a -route of `10.48.0.128/26 via 10.0.0.12 dev eth0` indicates that packets with destination IP address in `10.48.0.128/26` -should be routed to a next network hop of `10.0.0.12` over the `eth0` interface. - -Routes can be configured statically by an administrator, or programmed dynamically using routing protocols. When using -routing protocols each network device typically needs to be configured to tell it which other network devices it should -be exchanging routes with. The routing protocol then handles programming the right routes across the whole of the -network as devices are added or removed, or network links come in or out of service. - -One common routing protocol used in large enterprise and data center networks is [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol). BGP is one of the main protocols that powers -the internet, so scales incredibly well, and is very widely supported by modern routers. - -## Overlay networks - -An overlay network allows network devices to communicate across an underlying network (referred to as the underlay) -without the underlay network having any knowledge of the devices connected to the overlay network. From the point of -view of the devices connected to the overlay network, it looks just like a normal network. There are many different -kinds of overlay networks that use different protocols to make this happen, but in general they share the same common -characteristic of taking a network packet, referred to as the inner packet, and encapsulating it inside an outer network -packet. In this way the underlay sees the outer packets without needing to understand how to handle the inner packets. - -How the overlay knows where to send packets varies by overlay type and the protocols they use. Similarly exactly how the -packet is wrapped varies between different overlay types. In the case of VXLAN for example, the inner packet is wrapped -and sent as UDP in the outer packet. - -![Anatomy of an overlay network packet](/img/calico/anatomy-of-an-overlay-packet.svg) - -Overlay networks have the advantage of having minimal dependencies on the underlying network infrastructure, but have -the downsides of: - -- having a small performance impact compared to non-overlay networking, which you might want to avoid if running - network intensive workloads -- workloads on the overlay are not easily addressable from the rest of the network. so NAT gateways or load balancers - are required to bridge between the overlay and the underlay network for any ingress to, or egress from, the overlay. - -{{prodname}} networking options are exceptionally flexible, so in general you can choose whether you prefer -{{prodname}} to provide an overlay network, or non-overlay network. You can read more about this in the {{prodname}} -[determine best networking option](../networking/determine-best-networking.mdx) guide. - -## DNS - -While the underlying network packet flow across the network is determined using IP addresses, users and applications -typically want to use well known names to identify network destinations that remain consistent over time, even if the -underlying IP addresses change. For example, to map `google.com` to `216.58.210.46`. This translation from name to IP -address is handled by [DNS](https://en.wikipedia.org/wiki/Domain_Name_System). DNS runs on top of the base networking described so far. Each device connected to a network is typically configured -with the IP addresses of one or more DNS servers. When an application wants to connect to a domain name, a DNS message is -sent to the DNS server, which then responds with information about which IP address(es) the domain name maps to. The -application can then initiate its connection to the chosen IP address. - -## NAT - -Network Address Translation ([NAT](https://en.wikipedia.org/wiki/Network_address_translation)) is the process of mapping an IP address in a packet -to a different IP address as the packet passes through the device performing the NAT. Depending on the use case, NAT can -apply to the source or destination IP address, or to both addresses. - -One common use case for NAT is to allow devices with private IP address to talk to devices with public IP address across -the internet. For example, if a device with a private IP address attempts to connect to a public IP address, then the -router at the border of the private network will typically use SNAT (Source Network Address Translation) to map the -private source IP address of the packet to the router's own public IP address before forwarding it on to the internet. -The router then maps response packets coming in the opposite direction back to the original private IP address, so -packets flow end-to-end in both directions, with neither source or destination being aware the mapping is happening. The -same technique is commonly used to allow devices connected to an overlay network to connect with devices outside of the -overlay network. - -Another common use case for NAT is load balancing. In this case the load balancer performs DNAT (Destination Network -Address Translation) to change the destination IP address of the incoming connection to the IP address of the chosen -device it is load balancing to. The load balancer then reverses this NAT on response packets so neither source or -destination device is aware the mapping is happening. - -## MTU - -The Maximum Transmission Unit ([MTU](https://en.wikipedia.org/wiki/Maximum_transmission_unit)) of a network link is the maximum size of packet that -can be sent across that network link. It is common for all links in a network to be configured with the same MTU to -reduce the need to fragment packets as they traverse the network, which can significantly lower the performance of the -network. In addition, TCP tries to learn path MTUs, and adjust packet sizes for each network path based on the smallest -MTU of any of the links in the network path. When an application tries to send more data than can fit in a single -packet, TCP will fragment the data into multiple TCP segments, so the MTU is not exceeded. - -Most networks have links with an MTU of 1,500 bytes, but some networks support MTUs of 9,000 bytes. In a Linux system, -larger MTU sizes can result in lower CPU being used by the Linux networking stack when sending large amounts of data, -because it has to process fewer packets for the same amount of data. Depending on the network interface hardware being -used, some of this overhead may be offloaded to the network interface hardware, so the impact of small vs large MTU -sizes varies from device to device. diff --git a/calico_versioned_docs/version-3.25/about/index.mdx b/calico_versioned_docs/version-3.25/about/index.mdx deleted file mode 100644 index fe62185374..0000000000 --- a/calico_versioned_docs/version-3.25/about/index.mdx +++ /dev/null @@ -1,331 +0,0 @@ ---- -description: The value of using Calico for networking and network security for workloads and hosts. ---- - -# About Calico - -
    -
    - -
    -
    -

    - What is {{ prodname }}? -

    -

    - {{ prodname }} is an open source networking and network security solution for containers, virtual machines, and - native host-based workloads. {{ prodname }} supports a broad range of platforms including Kubernetes, OpenShift, - Mirantis Kubernetes Engine (MKE), OpenStack, and bare metal services. -

    -

    - Whether you opt to use {{ prodname }}'s eBPF data plane or Linux’s standard networking pipeline, {{ prodname }}{' '} - delivers blazing fast performance with true cloud-native scalability. {{ prodname }} provides developers and - cluster operators with a consistent experience and set of capabilities whether running in public cloud or on-prem, - on a single node, or across a multi-thousand node cluster. -

    -
    -
    - -
    - -
    -

    Why use {{ prodname }}?

    -
    - -
    - -
    -
    -

    - Choice of dataplanes -

    -

    - {{ prodname }} gives you a choice of dataplanes, including a pure Linux eBPF dataplane, a standard Linux - networking dataplane, and a Windows HNS dataplane. Whether you prefer cutting edge features of eBPF, or the - familiarity of the standard primitives that existing system administrators already know, Calico has you covered. -

    -

    - Whichever choice is right for you, you’ll get the same, easy to use, base networking, network policy and IP - address management capabilities, that have made Calico the most trusted networking and network policy solution for - mission-critical cloud-native applications. -

    -
    -
    - -
    -
    - -
    - -
    -
    - -
    -
    -

    - Best practices for network security -

    -

    - {{ prodname }}’s rich network policy model makes it easy to lock down communication so the only traffic that flows - is the traffic you want to flow. Plus with built in support for Wireguard encryption, securing your pod-to-pod - traffic across the network has never been easier. -

    -

    - {{ prodname }}’s policy engine can enforce the same policy model at the host networking layer and (if using Istio - & Envoy) at the service mesh layer, protecting your infrastructure from compromised workloads and protecting your - workloads from compromised infrastructure. -

    -
    -
    - -
    - -
    -
    -

    - Performance -

    -

    - Depending on your preference, {{ prodname }} uses either Linux eBPF or the Linux kernel's highly optimized - standard networking pipeline to deliver high performance networking. {{ prodname }}'s networking options are - flexible enough to run without using overlays in most environments, avoiding the overheads of packet encap/decap.{' '} - {{ prodname }}’s control plane and policy engine has been fine tuned over many years of production use to minimize - overall CPU usage and occupancy. -

    -
    -
    - -
    -
    - -
    - -
    -
    - -
    -
    -

    - Scalability -

    -

    - {{ prodname }}’s core design principles leverage best practice cloud-native design patterns combined with proven - standards based network protocols trusted worldwide by the largest internet carriers. The result is a solution - with exceptional scalability that has been running at scale in production for years. {{ prodname }}’s development - test cycle includes regularly testing multi-thousand node clusters. Whether you are running a 10 node cluster, 100 - node cluster, or more, you reap the benefits of the improved performance and scalability characteristics demanded - by the largest Kubernetes clusters. -

    -
    -
    - -
    - -
    -
    -

    - Interoperability -

    -

    - {{ prodname }} enables Kubernetes workloads and non-Kubernetes or legacy workloads to communicate seamlessly and - securely. Kubernetes pods are first class citizens on your network and able to communicate with any other workload - on your network. In addition {{ prodname }} can seamlessly extend to secure your existing host based workloads - (whether in public cloud or on-prem on VMs or bare metal servers) alongside Kubernetes. All workloads are subject - to the same network policy model so the only traffic that is allowed to flow is the traffic you expect to flow. -

    -
    -
    - -
    -
    - -
    - -
    -
    - -
    -
    -

    - Real world production hardened -

    -

    - {{ prodname }} is trusted and running in production at large enterprises including SaaS providers, financial - services companies, and manufacturers. The largest public cloud providers have selected {{ prodname }} to provide - network security for their hosted Kubernetes services (Amazon EKS, Azure AKS, Google GKE, and IBM IKS) running - across tens of thousands of clusters. -

    -
    -
    - -
    - -
    -
    -

    - Full Kubernetes network policy support -

    -

    - {{ prodname }}’s network policy engine formed the original reference implementation of Kubernetes network policy - during the development of the API. {{ prodname }} is distinguished in that it implements the full set of features - defined by the API giving users all the capabilities and flexibility envisaged when the API was defined. And for - users that require even more power, {{ prodname }} supports an extended set of network policy capabilities that - work seamlessly alongside the Kubernetes API giving users even more flexibility in how they define their network - policies. -

    -
    -
    - -
    -
    - -
    - -
    -
    - -
    -
    -

    - Contributor community -

    -

    - The Calico open source project is what it is today thanks to 200+ contributors across a broad range of companies. - In addition {{ prodname }} is backed by Tigera, founded by the original Calico engineering team, and committed to - maintaining {{ prodname }} as the leading standard for Kubernetes network security. -

    -
    -
    - -
    - -
    -
    -

    - Calico Cloud compatible -

    -

    - Calico Cloud builds on top of open source Calico to provide Kubernetes security and observability features and - capabilities: -

    -
      -
    • Egress access controls (DNS policies, egress gateways)
    • -
    • Extend firewall to Kubernetes
    • -
    • Hierarchical tiers
    • -
    • FQDN / DNS based policy
    • -
    • Micro-segmentation across host/VMs/containers
    • -
    • Security policy preview, staging, and recommendation
    • -
    • Compliance reporting and alerts
    • -
    • Intrusion detection & prevention (IDS / IPS) for Kubernetes
    • -
    • SIEM Integrations
    • -
    • Application Layer (L7) observability
    • -
    • Dynamic packet capture
    • -
    • DNS dashboards
    • -
    -
    - -
    diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/about.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/about.mdx deleted file mode 100644 index 08b3c38ce7..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/about.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: Install Calico on hosts not in a cluster with network policy, or networking and network policy. ---- - -# About non-cluster hosts - -## Big picture - -Secure non-cluster hosts by installing {{prodname}} for networking and/or networking policy. - -## Value - -Not all hosts in your environment run pods/workloads. You may have physical machines or legacy applications that you cannot move into a Kubernetes cluster, but still need to securely communicate with pods in your cluster. {{prodname}} lets you enforce policy on these **non-cluster hosts** using the same robust {{prodname}} network policy that you use for pods. - -## Concepts - -### Non-cluster hosts and host endpoints - -A **non-cluster host** is a computer that is running an application that is _not part of a Kubernetes cluster_. Using {{prodname}} network policy, you can secure these host interfaces using **host endpoints**. Host endpoints can have labels, and work the same as labels on pods/workload endpoints. - -The advantage is, you can write network policy rules to apply to both workload endpoints and host endpoints using label selectors; where each selector can refer to the either type (or be a mix of the two). For example, you can write a cluster-wide policy for non-cluster hosts that is immediately applied to every host. To learn how to restrict traffic to/from hosts using {{prodname}} network policy see, [Protect hosts](../../network-policy/hosts/protect-hosts.mdx). - -If you are using the etcd3 database, you can also install {{prodname}} with networking as described below. - -### Install options for non-cluster hosts - -| Install {{prodname}} with... | Requires | Use case | Supported install methods | -| ----------------------------- | -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------- | -| Policy only | An etcd3 or Kubernetes datastore | Use {{prodname}} network policy to control firewalls on non-cluster hosts. | Binary install with/ without a package manager | -| Networking and network policy | An etcd3 datastore | **Networking**
    Use {{prodname}} networking (BGP, or overlay with VXLAN or IP-in-IP) to handle these communications:
    - pod ↔ pod
    - pod ↔ host

    **Note**: {{prodname}} does not handle host ↔ host networking; your underlying network must already be set up to handle this.

    **Policy**
    Use {{prodname}} network policy to control firewalls on your non-cluster hosts. | Docker container | - -## Before you begin - -**Supported** - -- All platforms in this release, except Windows - -**Required** - -- Non-cluster host meets [system requirements](requirements.mdx) for {{prodname}}. If you want to use a package manager for installation, the non-cluster host must be a system derived from Ubuntu or RedHat. -- Set up a datastore; if {{prodname}} is installed on a cluster, you already have a datastore -- Install `kubectl` or [`calicoctl`](../../operations/calicoctl/index.mdx). (`kubectl` works only with the Kubernetes datastore.) - -## Next steps - -Select an install method. - -:::note - -{{prodname}} must be installed on each non-cluster host that you want to control with networking and/or policy. - -::: - -| Install method | Networking | Policy | -| ------------------------------------------------------------------ | ---------- | ------ | -| [Docker container](installation/container.mdx) | ✓ | ✓ | -| [Binary install with package manager](installation/binary-mgr.mdx) | | ✓ | -| [Binary install without package manager](installation/binary.mdx) | | ✓ | diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/index.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/index.mdx deleted file mode 100644 index c854afe88d..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico on hosts to secure host communications. -hide_table_of_contents: true ---- - -# Non-cluster hosts - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary-mgr.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary-mgr.mdx deleted file mode 100644 index 587b38de48..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary-mgr.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -description: Install Calico on non-cluster host using a package manager. ---- - -# Binary install with package manager - -import FelixInitDatastore from '@site/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx'; - -## Big picture - -Install {{prodname}} on non-cluster hosts using a package manager. - -## Value - -Packaged binaries of {{prodname}} are easy to consume and upgrade. This method automatically configures the init system to keep Felix running. - -## Before you begin... - -1. Ensure the {{prodname}} datastore is up and accessible from the host -1. Ensure the host meets the minimum [system requirements](../requirements.mdx) -1. If your system is not an Ubuntu- or RedHat-derived system, you will need to choose a different install method. -1. If you want to install {{prodname}} with networking (so that you can communicate with cluster workloads), you should choose the [container install method](container.mdx) -1. Install `kubectl` (for Kubernetes datastore) or [Install and configure `calicoctl`](../../../operations/calicoctl/index.mdx) for etcd3 datastore. - -## How to - -This guide covers installing Felix, the {{prodname}} daemon that handles network policy. - -### Step 1: Install binaries - -```bash -sudo add-apt-repository ppa:project-calico/{{ ppa_repo_name }} -sudo apt-get update -sudo apt-get upgrade -sudo apt-get install calico-felix -``` - -_RPM requires_: RedHat 7-derived distribution - -```bash -cat > /etc/yum.repos.d/calico.repo < - -Modify the included init system unit to include the `EnvironmentFile`. For example, on systemd, add the following line to the `[Service]` section of the `calico-felix` unit. - -```bash -EnvironmentFile=/etc/calico/calico.env -``` - -### Step 3: Initialize the datastore - - diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary.mdx deleted file mode 100644 index 4f962884b5..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/binary.mdx +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Install Calico binary on non-cluster hosts without a package manager. ---- - -# Binary install without package manager - -import FelixInitDatastore from '@site/calico_versioned_docs/version-3.25/_includes/content/_felix-init-datastore.mdx'; - -## Big picture - -Install {{prodname}} binary on non-cluster hosts without a package manager. - -## Value - -Install {{prodname}} directly when a package manager isn't available, or your provisioning system can easily handle copying binaries to hosts. - -## Before you begin... - -1. Ensure the {{prodname}} datastore is up and accessible from the host -1. Ensure the host meets the minimum [system requirements](../requirements.mdx) -1. If you want to install {{prodname}} with networking (so that you can communicate with cluster workloads), you should choose the [container install method](container.mdx) -1. Install `kubectl` (for Kubernetes datastore) or [Install and configure `calicoctl`](../../../operations/calicoctl/index.mdx) for etcd3 datastore. - -## How to - -This guide covers installing Felix, the {{prodname}} daemon that handles network policy. - -### Step 1: Download and extract the binary - -This step requires Docker, but it can be run from any machine with Docker installed. It doesn't have to be the host you will run it on (i.e your laptop is fine). - -1. Use the following command to download the {{nodecontainer}} image. - - ```bash - docker pull {{nodecontainer}}:{{releases.0.components.calico/node.version}} - ``` - -1. Confirm that the image has loaded by typing `docker images`. - - ```bash - REPOSITORY TAG IMAGE ID CREATED SIZE - {{nodecontainer}} {{releases.0.components.calico/node.version}} e07d59b0eb8a 2 minutes ago 42MB - ``` - -1. Create a temporary {{nodecontainer}} container. - - ```bash - docker create --name container {{nodecontainer}}:{{releases.0.components.calico/node.version}} - ``` - -1. Copy the calico-node binary from the container to the local file system. - - ```bash - docker cp container:/bin/calico-node calico-node - ``` - -1. Delete the temporary container. - - ```bash - docker rm container - ``` - -1. Set the extracted binary file to be executable. - - ``` - chmod +x calico-node - chown root:root calico-node - ``` - -### Step 2: Copy the `calico-node` binary - -Copy the binary from Step 1 to the target machine, using any means (`scp`, `ftp`, USB stick, etc.). - -### Step 3: Create environment file - - - -### Step 4: Create a start-up script - -Felix should be started at boot by your init system and the init system -**must** be configured to restart Felix if it stops. Felix relies on -that behavior for certain configuration changes. - -If your distribution uses systemd, then you could use the following unit -file: - -```bash -[Unit] -Description=Calico Felix agent -After=syslog.target network.target - -[Service] -User=root -EnvironmentFile=/etc/calico/calico.env -ExecStartPre=/usr/bin/mkdir -p /var/run/calico -ExecStart=/usr/local/bin/calico-node -felix -KillMode=process -Restart=on-failure -LimitNOFILE=32000 - -[Install] -WantedBy=multi-user.target -``` - -Once you've configured Felix, start it up via your init system. - -```bash -service calico-felix start -``` - -### Step 5: Initialize the datastore - - diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/container.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/container.mdx deleted file mode 100644 index 8c29b1c3d5..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/container.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: Install Calico on non-cluster hosts using a Docker container. ---- - -# Docker container install - -import DockerContainerService from '@site/calico_versioned_docs/version-3.25/_includes/content/_docker-container-service.mdx'; - -## Big picture - -Install {{prodname}} on non-cluster hosts using a Docker container for both networking and policy. - -## Value - -Installing {{prodname}} with a Docker container includes everything you need for both networking and policy. It also automatically adds the appropriate per-node configuration to the datastore. - -## Before you begin... - -1. Ensure Docker is installed -1. Ensure the {{prodname}} datastore is up and accessible from the host -1. Ensure the host meets the minimum [system requirements](../requirements.mdx) - -## How to - -The `{{nodecontainer}}` container should be started at boot time by your init system and the init system must be configured to restart it if stopped. {{prodname}} relies on that behavior for certain configuration changes. - - diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/index.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/index.mdx deleted file mode 100644 index ae7bd06532..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/installation/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico on hosts to secure host communications. -hide_table_of_contents: true ---- - -# Install on non-cluster hosts - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/bare-metal/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/bare-metal/requirements.mdx deleted file mode 100644 index 965123df80..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/bare-metal/requirements.mdx +++ /dev/null @@ -1,9 +0,0 @@ ---- -description: Review node requirements for installing Calico. ---- - -# System requirements - - - - diff --git a/calico_versioned_docs/version-3.25/getting-started/index.mdx b/calico_versioned_docs/version-3.25/getting-started/index.mdx deleted file mode 100644 index 1100c10605..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico on nodes and hosts for popular orchestrators, and install the calicoctl command line interface (CLI) tool. -hide_table_of_contents: true ---- - -# Install Calico - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/index.mdx deleted file mode 100644 index 16fa32322f..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Use Calico network policy on top of flannel networking. -hide_table_of_contents: true ---- - -# Flannel - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/install-for-flannel.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/install-for-flannel.mdx deleted file mode 100644 index 25911d385f..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/install-for-flannel.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -description: If you use flannel for networking, you can install Calico network policy to secure cluster communications. ---- - -# Install Calico for policy and flannel (aka Canal) for networking - -## Before you begin - -:::note - -Calico includes native VXLAN capabilities without the need for flannel. If you're planning on using flannel for VXLAN, we recommend instead installing Calico using IP-in-IP or VXLAN mode. See how to [determine the best networking option](../../../networking/determine-best-networking.mdx) for your cluster. -If you're already using flannel for networking, you can [migrate your existing clusters to Calico networking](migration-from-flannel.mdx). - -::: - -Ensure that you have a Kubernetes cluster that meets the -{{prodname}} [system requirements](../requirements.mdx). If you don't, -follow the steps in [Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). - -## Installing {{prodname}} for policy and flannel (aka Canal) for networking - -### Selecting a datastore type - -The procedure differs according to your datastore type. Refer to the section that matches your type. - -- [Kubernetes API datastore](#installing-with-the-kubernetes-api-datastore-recommended) (recommended) - -- [etcd datastore](#installing-with-the-etcd-datastore) - -### Installing with the Kubernetes API datastore (recommended) - -1. Ensure that the Kubernetes controller manager has the following flags - set:
    - `--cluster-cidr=` and `--allocate-node-cidrs=true`. - - :::tip - - On kubeadm, you can pass `--pod-network-cidr=` - to kubeadm to set both Kubernetes controller flags. - - ::: - -1. Download the flannel networking manifest for the Kubernetes API datastore. - - ```bash - curl {{manifestsUrl}}/manifests/canal.yaml -O - ``` - -1. If your cluster is configured to use pod CIDR `10.244.0.0/16`, skip to the next step. - If your cluster is configured to use a different pod CIDR, replace `10.244.0.0/16` in the downloaded manifest with the correct pod CIDR. - -1. Issue the following command to install {{prodname}}. - - ```bash - kubectl apply -f canal.yaml - ``` - -1. If you wish to enforce application layer policies and secure workload-to-workload - communications with mutual TLS authentication, continue to [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx) (optional). - -The geeky details of what you get: - - - -### Installing with the etcd datastore - -We strongly recommend using the Kubernetes API datastore, but if you prefer to use -etcd, complete the following steps. - -1. Download the {{prodname}} networking manifest. - - ```bash - curl {{manifestsUrl}}/manifests/canal-etcd.yaml -O - ``` - -1. If your cluster is configured to use pod CIDR `10.244.0.0/16`, skip to the next step. - If your cluster is configured to use a different pod CIDR, replace `10.244.0.0/16` in the downloaded manifest with the correct pod CIDR. - -1. In the `ConfigMap` named `calico-config`, set the value of - `etcd_endpoints` to the IP address and port of your etcd server. - - :::tip - - You can specify more than one using commas as delimiters. - - ::: - -1. Apply the manifest using the following command. - - ```bash - kubectl apply -f canal-etcd.yaml - ``` - -1. If you wish to enforce application layer policies and secure workload-to-workload - communications with mutual TLS authentication, continue to [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx) (optional). - -The geeky details of what you get: - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/migration-from-flannel.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/migration-from-flannel.mdx deleted file mode 100644 index 77a35b4438..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/flannel/migration-from-flannel.mdx +++ /dev/null @@ -1,193 +0,0 @@ ---- -description: Preserve your existing VXLAN networking in Calico, but take full advantage of Calico IP address management (IPAM) and advanced network policy features. ---- - -# Migrate a Kubernetes cluster from flannel/Canal to Calico - -## Big picture - -Migrate an existing Kubernetes cluster with flannel/Canal to {{prodname}} networking. - -## Value - -If you are already using flannel for networking, it is easy to migrate to {{prodname}}'s native VXLAN networking. {{prodname}} VXLAN is fully equivalent to flannel vxlan, but you get the benefits of the broader range of features offered by {{prodname}} with an active maintainer community. - -## Concepts - -### Limitations of host-local IPAM in flannel - -Flannel networking uses the host-local IPAM (IP address management) CNI plugin, which provides simple IP address management for your cluster. Although simple, it has limitations: - -- When you create a node, it is pre-allocated a CIDR. If the number of pods per-node exceeds the number of IP addresses available per node, you must recreate the cluster. Conversely, if the number of pods is much smaller than the number of addresses available per node, IP address space is not efficiently used; as you scale out and IP addresses are depleted, inefficiencies become a pain point. - -- Because each node has a pre-allocated CIDR, pods must always have an IP address assigned based on the node it is running on. Being able to allocate IP addresses based on other attributes (for example, the pod’s namespace), provides flexibility to meet use cases that arise. - -Migrating to {{prodname}} IPAM solves these use cases and more. For advantages of Calico IPAM, see [Blog: Live Migration from Flannel to Calico](https://www.projectcalico.org/live-migration-from-flannel-to-calico/). - -### Methods for migrating to {{prodname}} networking - -There are two ways to switch your cluster to use {{prodname}} networking. Both methods give you a fully-functional {{prodname}} cluster using VXLAN networking between pods. - -- **Create a new cluster using {{prodname}} and migrate existing workloads** - - If you have the ability to migrate workloads from one cluster to the next without caring about downtime, this is the easiest method: [create a new cluster using {{prodname}}](../quickstart.mdx). - -- **Live migration on an existing cluster** - - If your workloads are already in production, or downtime is not an option, use the live migration tool that performs a rolling update of each node in the cluster. - -## Before you begin... - -**Required** - -- A cluster with flannel for networking using the VXLAN backend. -- Flannel version v0.9.1 or higher (Canal version v3.7.0 or greater). -- Flannel must have been installed using a **Kubernetes daemon set** and configured: - - To use the Kubernetes API for storing its configuration (as opposed to etcd) - - With `DirectRouting` disabled (default) -- Cluster must allow for: - - Adding/deleting/modifying node labels - - Modifying and deleting of the flannel daemon set. For example, it must not be installed using the Kubernetes Addon-manager. - -## How to - -- [Migrate from flannel networking to Calico networking, live migration](#migrate-from-flannel-networking-to-calico-networking-live-migration) -- [Modify flannel configuration](#modify-flannel-configuration) -- [View migration status](#view-migration-status) -- [View migration logs](#view-migration-logs) -- [Revert migration](#revert-migration) - -### Migrate from flannel networking to Calico networking, live migration - -1. Install {{prodname}}. - - ``` - kubectl apply -f {{manifestsUrl}}/manifests/flannel-migration/calico.yaml - ``` - -1. Start the migration controller. - - ``` - kubectl apply -f {{manifestsUrl}}/manifests/flannel-migration/migration-job.yaml - ``` - - You will see nodes begin to update one at a time. - -1. Monitor the migration. - - ``` - kubectl get jobs -n kube-system flannel-migration - ``` - - When the host node is upgraded, the migration controller may be rescheduled several times. The installation is complete when the output of the above command shows 1/1 completions. For example: - - ``` - NAME COMPLETIONS DURATION AGE - flannel-migration 1/1 2m59s 5m9s - ``` - -1. Delete the migration controller. - - ``` - kubectl delete -f {{manifestsUrl}}/manifests/flannel-migration/migration-job.yaml - ``` - -### Modify flannel configuration - -The migration controller autodetects your flannel configuration, and in most cases, does not require -additional configuration. If you require special configuration, the migration tool provides the following options, -which can be set as environment variables within the pod. - -| Configuration options | Description | Default | -| ------------------------- | ---------------------------------------------------------------- | ------------------------ | -| FLANNEL_NETWORK | IPv4 network CIDR used by flannel for the cluster. | Automatically detected | -| FLANNEL_IPV6_NETWORK | IPv6 network CIDR used by flannel for the cluster. | Automatically detected | -| FLANNEL_DAEMONSET_NAME | Name of the flannel daemon set in the kube-system namespace. | kube-flannel-ds | -| FLANNEL_MTU | MTU for the flannel VXLAN device. | Automatically detected | -| FLANNEL_IP_MASQ | Whether masquerading is enabled for outbound traffic. | Automatically detected | -| FLANNEL_SUBNET_LEN | Per-node IPv4 subnet length used by flannel. | 24 | -| FLANNEL_IPV6_SUBNET_LEN | Per-node IPv6 subnet length used by flannel. | 64 | -| FLANNEL_ANNOTATION_PREFIX | Value provided via the kube-annotation-prefix option to flannel. | flannel.alpha.coreos.com | -| FLANNEL_VNI | The VNI used for the flannel network. | 1 | -| FLANNEL_PORT | UDP port used for VXLAN. | 8472 | -| CALICO_DAEMONSET_NAME | Name of the calico daemon set in the kube-system namespace. | calico-node | -| CNI_CONFIG_DIR | Full path on the host in which to search for CNI config files. | /etc/cni/net.d | - -### View migration status - -View the controller's current status. - -``` -kubectl get pods -n kube-system -l k8s-app=flannel-migration-controller -``` - -### View migration logs - -View migration logs to see if any actions are required. - -``` -kubectl logs -n kube-system -l k8s-app=flannel-migration-controller -``` - -### Revert migration - -If you need to revert a cluster from {{prodname}} back to flannel, follow these steps. - -1. Remove the migration controller and {{prodname}}. - - ``` - kubectl delete -f {{manifestsUrl}}/manifests/flannel-migration/migration-job.yaml - kubectl delete -f {{manifestsUrl}}/manifests/flannel-migration/calico.yaml - ``` - -1. Determine the nodes that were migrated to {{prodname}}. - - ``` - kubectl get nodes -l projectcalico.org/node-network-during-migration=calico - ``` - -Then, for each node found above, run the following commands to delete Calico. - -1. Cordon and drain the node. - - ``` - kubectl drain - ``` - -1. Log in to the node and remove the CNI configuration. - - ``` - rm /etc/cni/net.d/10-calico.conflist - ``` - -1. Reboot the node. - -1. Enable flannel on the node. - - ``` - kubectl label node projectcalico.org/node-network-during-migration=flannel --overwrite - ``` - -1. Uncordon the node. - - ``` - kubectl uncordon - ``` - -After the above steps have been completed on each node, perform the following steps. - -1. Remove the `nodeSelector` from the flannel daemonset. - - ``` - kubectl patch ds/kube-flannel-ds -n kube-system -p '{"spec": {"template": {"spec": {"nodeSelector": null}}}}' - ``` - -1. Remove the migration label from all nodes. - - ``` - kubectl label node --all projectcalico.org/node-network-during-migration- - ``` - -## Next steps - -Learn about [{{prodname}} IP address management](../../../networking/ipam/index.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-bgp-peering.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-bgp-peering.mdx deleted file mode 100644 index f923d1c55e..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/configure-bgp-peering.mdx +++ /dev/null @@ -1,160 +0,0 @@ ---- -description: Quick review of BGP peering options. ---- - -# Configure BGP peering - -We have configured {{prodname}} to distribute routing information over the -Border Gateway Protocol (BGP). This scalable protocol powers routing on the global -public Internet. - -In many on-premise data centers, each server connects to a top-of-rack (ToR) router -operating at the IP layer (layer 3). In that situation, we would need to peer each node -with its corresponding ToR router, so that the ToR learns routes to the containers. That -configuration is beyond the scope of this guide. - -Since we are running in an AWS VPC within a single subnet, the hosts have Ethernet (layer 2) -connectivity with one another, meaning there are no routers between them. Thus, they can peer -directly with each other. - -On one of the nodes in your cluster where you have `calicoctl` installed, check the status. - -```bash -sudo calicoctl node status -``` - -Result - -``` -Calico process is running. - -IPv4 BGP status -+---------------+-------------------+-------+----------+-------------+ -| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | -+---------------+-------------------+-------+----------+-------------+ -| 172.31.40.217 | node-to-node mesh | up | 17:38:47 | Established | -| 172.31.40.30 | node-to-node mesh | up | 17:40:09 | Established | -| 172.31.45.29 | node-to-node mesh | up | 17:40:20 | Established | -| 172.31.37.123 | node-to-node mesh | up | 17:40:29 | Established | -+---------------+-------------------+-------+----------+-------------+ - -IPv6 BGP status -No IPv6 peers found. -``` - -Alternatively, you can create a [`CalicoNodeStatus` resource](../../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node. - -Notice there are four BGP sessions, one to each other node in the cluster. In a small cluster, this -works well and is highly resilient. However, the total number of BGP sessions scales as the square -of the number of nodes, and in a large cluster this creates a lot of overhead. - -In this lab we will configure a fixed number of _route reflectors_. Route reflectors announce their -own routes and the routes they receive from other peers. This means nodes only need to peer with the -route reflectors to get all the routes in the cluster. This peering arrangement means that the number -of BGP sessions scales linearly with the number of nodes. - -## Choose and label nodes - -We will establish three route reflectors, which means we avoid a single point of failure even if we take down -a route reflector node for maintenance. In a five node cluster that means that only one BGP session is not -needed, since the two non-reflector nodes don't need to peer with one another, but it will save lots of overhead -in a large cluster. - -Choose three nodes and perform the following for each of them. - -Save the node YAML. - -```bash -calicoctl get node -o yaml --export > node.yaml -``` - -Edit the YAML to add - -```yaml -metadata: - labels: - calico-route-reflector: '' -spec: - bgp: - routeReflectorClusterID: 224.0.0.1 -``` - -Reapply the YAML - -```bash -calicoctl apply -f node.yaml -``` - -## Configure peering - -Configure all non-reflector nodes to peer with all route reflectors - -```bash -calicoctl apply -f - < pool1.yaml < pool2.yaml < diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-cni-plugin.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-cni-plugin.mdx deleted file mode 100644 index 1ce709ee69..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/install-cni-plugin.mdx +++ /dev/null @@ -1,197 +0,0 @@ ---- -description: Steps to install the Calico Container Network Interface (CNI) ---- - -# Install CNI plugin - -Kubernetes uses the Container Network Interface (CNI) to interact with networking providers like {{prodname}}. -The {{prodname}} binary that presents this API to Kubernetes is called the **CNI plugin** and must be installed -on every node in the Kubernetes cluster. - -To understand how the Container Network Interface (CNI) works with Kubernetes, and how it enhances Kubernetes networking, read our [Kubernetes CNI guide](https://www.tigera.io/learn/guides/kubernetes-networking/kubernetes-cni/). - -## Provision Kubernetes user account for the plugin - -The CNI plugin interacts with the Kubernetes API server while creating pods, both to obtain additional information -and to update the datastore with information about the pod. - -On the Kubernetes control plane node, create a key for the CNI plugin to authenticate with and certificate signing request. - -```bash -openssl req -newkey rsa:4096 \ - -keyout cni.key \ - -nodes \ - -out cni.csr \ - -subj "/CN=calico-cni" -``` - -We will sign this certificate using the main Kubernetes CA. - -```bash -sudo openssl x509 -req -in cni.csr \ - -CA /etc/kubernetes/pki/ca.crt \ - -CAkey /etc/kubernetes/pki/ca.key \ - -CAcreateserial \ - -out cni.crt \ - -days 365 -sudo chown $(id -u):$(id -g) cni.crt -``` - -Next, we create a kubeconfig file for the CNI plugin to use to access Kubernetes. Copy this `cni.kubeconfig` file **to every node** in the cluster. - -```bash -APISERVER=$(kubectl config view -o jsonpath='{.clusters[0].cluster.server}') -kubectl config set-cluster kubernetes \ - --certificate-authority=/etc/kubernetes/pki/ca.crt \ - --embed-certs=true \ - --server=$APISERVER \ - --kubeconfig=cni.kubeconfig - -kubectl config set-credentials calico-cni \ - --client-certificate=cni.crt \ - --client-key=cni.key \ - --embed-certs=true \ - --kubeconfig=cni.kubeconfig - -kubectl config set-context default \ - --cluster=kubernetes \ - --user=calico-cni \ - --kubeconfig=cni.kubeconfig - -kubectl config use-context default --kubeconfig=cni.kubeconfig -``` - -## Provision RBAC - -Define a cluster role the CNI plugin will use to access Kubernetes. - -```bash -kubectl apply -f - < /etc/cni/net.d/10-calico.conflist <` -1. Copy admin credentials -1. Test Access - - 1. Run - - `kubectl get nodes` - - Verify all nodes have joined. At this point nodes have joined but they are in `NotReady` state, because Kubernetes can't find a networking provider and configuration. - -## Next - -[The Calico datastore](the-calico-datastore.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-network-policy.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-network-policy.mdx deleted file mode 100644 index 3d260dadd4..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-network-policy.mdx +++ /dev/null @@ -1,13 +0,0 @@ ---- -description: Verify that network policy works correctly. ---- - -# Test network policy - -In this lab we will test network policy. - -Follow the instructions in the [Simple policy tutorial](../../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx) to verify policy works correctly. - -## Next - -[End user RBAC](end-user-rbac.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-networking.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-networking.mdx deleted file mode 100644 index 0739244287..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/test-networking.mdx +++ /dev/null @@ -1,172 +0,0 @@ ---- -description: Test that networking works correctly. ---- - -# Test networking - -In this lab we will test the {{prodname}} cluster to demonstrate networking is working correctly. - -## Pod to pod pings - -Create three busybox instances - -```bash -kubectl create deployment pingtest --image=busybox --replicas=3 -- sleep infinity -``` - -Check their IP addresses - -```bash -kubectl get pods --selector=app=pingtest --output=wide -``` - -Result - -``` -NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES -pingtest-b4b6f8cf-b5z78 1/1 Running 0 3m28s 192.168.38.128 ip-172-31-37-123 -pingtest-b4b6f8cf-jmzq6 1/1 Running 0 3m28s 192.168.45.193 ip-172-31-40-217 -pingtest-b4b6f8cf-rn9nm 1/1 Running 0 3m28s 192.168.60.64 ip-172-31-45-29 -``` - -Note the IP addresses of the second two pods, then exec into the first one. For example - -```bash -kubectl exec -ti pingtest-b4b6f8cf-b5z78 -- sh -``` - -From inside the pod, ping the other two pod IP addresses. For example - -```bash -ping 192.168.45.193 -c 4 -``` - -Result - -``` -PING 192.168.45.193 (192.168.45.193): 56 data bytes -64 bytes from 192.168.45.193: seq=0 ttl=62 time=1.847 ms -64 bytes from 192.168.45.193: seq=1 ttl=62 time=0.684 ms -64 bytes from 192.168.45.193: seq=2 ttl=62 time=0.488 ms -64 bytes from 192.168.45.193: seq=3 ttl=62 time=0.442 ms - ---- 192.168.45.193 ping statistics --- -4 packets transmitted, 4 packets received, 0% packet loss -round-trip min/avg/max = 0.442/0.865/1.847 ms -``` - -## Check routes - -From one of the nodes, verify that routes exist to each of the `pingtest` pods' IP addresses. For example - -```bash -ip route get 192.168.38.128 -``` - -Result - -``` -192.168.38.128 via 172.31.37.123 dev eth0 src 172.31.42.47 uid 1000 - cache -``` - -The `via 172.31.37.123` in this example indicates the next-hop for this pod IP, which matches the IP address of the node the -pod is scheduled on, as expected. - -## IPAM allocations from different pools - -Recall that we created two IP pools, but left one disabled. - -```bash -calicoctl get ippools -o wide -``` - -Result - -``` -NAME CIDR NAT IPIPMODE VXLANMODE DISABLED SELECTOR -pool1 192.168.0.0/18 true Never Never false all() -pool2 192.168.192.0/19 true Never Never true all() -``` - -Enable the second pool. - -```bash -calicoctl apply -f - < -``` - -From one of the original pingtest pods, ping the IP address. - -```bash -ping 192.168.219.0 -c 4 -``` - -Result - -``` -PING 192.168.219.0 (192.168.219.0): 56 data bytes -64 bytes from 192.168.219.0: seq=0 ttl=62 time=0.524 ms -64 bytes from 192.168.219.0: seq=1 ttl=62 time=0.459 ms -64 bytes from 192.168.219.0: seq=2 ttl=62 time=0.505 ms -64 bytes from 192.168.219.0: seq=3 ttl=62 time=0.492 ms - ---- 192.168.219.0 ping statistics --- -4 packets transmitted, 4 packets received, 0% packet loss -round-trip min/avg/max = 0.459/0.495/0.524 ms -``` - -## Clean up - -```bash -kubectl delete deployments.apps pingtest -kubectl delete pod pingtest-pool2 -``` - -## Next - -[Test network policy](test-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/the-calico-datastore.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/the-calico-datastore.mdx deleted file mode 100644 index 69e9ca32c2..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/hardway/the-calico-datastore.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -description: The central datastore for your clusters' operational and configuration state. ---- - -# The Calico datastore - -{{prodname}} stores the data about the operational and configuration state of your cluster in a central datastore. If the datastore is unavailable -your {{prodname}} network continues operating, but cannot be updated (no new pods can be networked, no policy changes can be applied, etc.). - -{{prodname}} has two datastore drivers you can choose from - -- **etcd** - for direct connection to an etcd cluster -- **Kubernetes** - for connection to a Kubernetes API server - -## Using Kubernetes as the datastore - -This guide uses the Kubernetes API datastore driver. The advantages of this driver when using {{prodname}} on Kubernetes are - -- Doesn't require an extra datastore, so is simpler to manage -- You can use Kubernetes RBAC to control access to {{prodname}} resources -- You can use Kubernetes audit logging to generate audit logs of changes to {{prodname}} resources - -For completeness, the advantages of the etcd driver are - -- Allows you to run {{prodname}} on non-Kubernetes platforms (e.g. OpenStack) -- Allows separation of concerns between Kubernetes and {{prodname}} resources, for example allowing you to scale the datastores independently -- Allows you to run a {{prodname}} cluster that contains more than just a single Kubernetes cluster, for example, bare metal servers with {{prodname}} - host protection interworking with a Kubernetes cluster; or multiple Kubernetes clusters. - -## Custom Resources - -When using the Kubernetes API datastore driver, most {{prodname}} resources are stored as [Kubernetes custom resources](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). - -A few {{prodname}} resources are not stored as custom resources and instead are backed by corresponding native Kubernetes resources. For example, [workload endpoints](../../../reference/resources/workloadendpoint.mdx) are Kubernetes pods. - -To use Kubernetes as the {{prodname}} datastore, we need to define the custom resources {{prodname}} uses. - -Download and examine the list of {{prodname}} custom resource definitions, and open it in a file editor. - -```bash -wget {{manifestsUrl}}/manifests/crds.yaml -``` - -Create the custom resource definitions in Kubernetes. - -```bash -kubectl apply -f crds.yaml -``` - -## calicoctl - -To interact directly with the {{prodname}} datastore, use the `calicoctl` client tool. - -### Install - -1. Download the `calicoctl` binary to a Linux host with access to Kubernetes. - - ```bash - wget -O calicoctl https://github.com/projectcalico/calico/releases/latest/download/calicoctl-linux-amd64 - chmod +x calicoctl - sudo mv calicoctl /usr/local/bin/ - ``` - -1. Configure `calicoctl` to access Kubernetes. - - ```bash - export KUBECONFIG=/path/to/your/kubeconfig - export DATASTORE_TYPE=kubernetes - ``` - - On most systems, kubeconfig is located at `~/.kube/config`. You may wish to add the `export` lines to your `~/.bashrc` so they will persist when you log in next time. - -### Test - -Verify `calicoctl` can reach your datastore by running - -```bash -calicoctl get nodes -``` - -You should see output similar to - -```bash -NAME -ip-172-31-37-123 -ip-172-31-40-217 -ip-172-31-40-30 -ip-172-31-42-47 -ip-172-31-45-29 -``` - -Nodes are backed by the Kubernetes node object, so you should see names that match `kubectl get nodes`. - -Try to get an object backed by a custom resource - -```bash -calicoctl get ippools -``` - -You should see an empty result - -```bash -NAME CIDR SELECTOR - -``` - -## Next - -[Configure IP pools](configure-ip-pools.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/helm.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/helm.mdx deleted file mode 100644 index 4d36fb6adb..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/helm.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -description: Install Calico on a Kubernetes cluster using Helm 3. ---- - -# Install using Helm - -## Big picture - -Install {{prodname}} on a Kubernetes cluster using Helm 3. - -## Value - -Helm charts are a way to package up an application for Kubernetes (similar to `apt` or `yum` for operating systems). Helm is also used by tools like ArgoCD to manage applications in a cluster, taking care of install, upgrade (and rollback if needed), etc. - -## Before you begin - -**Required** - -- Install Helm 3 -- Kubernetes cluster meets these requirements: - - Kubernetes is installed _without_ a CNI plugin **OR** cluster is running a compatible CNI for {{prodname}} to run in policy-only mode - - x86-64, arm64, ppc64le, or s390x processors - - RedHat Enterprise Linux 7.x+, CentOS 7.x+, Ubuntu 18.04+, or Debian 9.x+ -- `kubeconfig` is configured to work with your cluster (check by running `kubectl get nodes`) -- {{prodname}} can manage `cali` and `tunl` interfaces on the hosts. - If NetworkManager is present on the hosts, refer to - [Configure NetworkManager](../../operations/troubleshoot/troubleshooting.mdx#configure-networkmanager). - -## Concepts - -### Operator based installation - -In this guide, you install the Tigera {{prodname}} operator and custom resource definitions using the Helm 3 chart. The Tigera operator provides lifecycle management for {{prodname}} exposed via the Kubernetes API defined as a custom resource definition. - -## How to - -### Download the Helm chart - -1. Add the {{prodname}} helm repo: - -```bash -helm repo add projectcalico https://docs.tigera.io/calico/charts -``` - -### Customize the Helm chart - -If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), or you need to customize TLS certificates, you **must** customize this Helm chart by creating a `values.yaml` file. Otherwise, you can skip this step. - -1. If you are installing on a cluster installed by EKS, GKE, AKS or Mirantis Kubernetes Engine (MKE), set the `kubernetesProvider` as described in the [Installation reference](../../reference/installation/api.mdx#operator.tigera.io/v1.Provider). For example: - - ``` - echo '{ installation: {kubernetesProvider: EKS }}' > values.yaml - ``` - - For Azure AKS cluster with no Kubernetes CNI pre-installed, create `values.yaml` with the following command: - - ``` - cat > values.yaml < diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/index.mdx deleted file mode 100644 index 2b9ed15c57..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Get Calico up and running in your K3s cluster. -hide_table_of_contents: true ---- - -# K3s - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/multi-node-install.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/multi-node-install.mdx deleted file mode 100644 index 1c85ce7b3d..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/multi-node-install.mdx +++ /dev/null @@ -1,222 +0,0 @@ ---- -description: Install Calico on a multi node K3s cluster for testing or development. ---- - -# K3s multi-node install - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -This tutorial gets you a multi node K3s cluster with {{prodname}} in approximately 10 minutes. - -## Value - -K3s is a lightweight implementation of Kubernetes packaged as a single binary. - -The geeky details of what you get: - - - -## Before you begin - -- Make sure you have a linux host that meets the following requirements - - x86-64 processor - - 1CPU - - 1GB Ram - - 10GB free disk space - - Ubuntu 18.04 (amd64), Ubuntu 20.04 (amd64) - -:::note - -K3s supports ARM processors too, this tutorial was tested against x86-64 processor environment. -For more detail please visit [this link](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#operating-systems). - -::: - -## How to - -### Initializing control plane instance - -K3s installation script can be modified by [environment variables](https://rancher.com/docs/k3s/latest/en/installation/install-options/#options-for-installation-with-script). Here you are providing some extra arguments to disable `flannel`, disable k3s default network policy and change the pod ip CIDR. - -:::note - -Full list of arguments can be viewed [at this link](https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/). - -::: - - curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--flannel-backend=none --disable-network-policy --cluster-cidr=192.168.0.0/16" sh - - -:::caution - -If 192.168.0.0/16 is already in use within your network you must select a different pod network -CIDR by replacing 192.168.0.0/16 in the above command. - -::: - -### Enable remote access to your K3s instance - -To set up remote access to your cluster first ensure you have installed `kubectl` on your system. - -:::note - -If you are not sure how to install kubectl in your OS [visit this link](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -::: - -K3s stores a kubeconfig file in your server at `/etc/rancher/k3s/k3s.yaml`, copy all the content of `k3s.yaml` from your server into `~/.kube/config` on the system that you like to have remote access to the cluster. - -### Add extra nodes to K3s cluster - -To add additional nodes to your cluster you need two piece of information. - -- `K3S_URL` which is going to be your main node ip address. -- `K3S_TOKEN` which is stored in `/var/lib/rancher/k3s/server/node-token` file in main Node [(Step 1)](#initializing-master-instance). - Execute following command in your node instance and join it to the cluster. - -:::note - -Remember to change `serverip` and `mytoken`. - -::: - -```bash -curl -sfL https://get.k3s.io | K3S_URL=https://serverip:6443 K3S_TOKEN=mytoken sh - -``` - -### Install {{prodname}} - - - - -Install the {{prodname}} operator and custom resource definitions. - -```bash -kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml -``` - -:::note - -Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - -::: - -Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx). - -```bash -kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml -``` - -:::note - -Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example, -you may need to change the default IP pool CIDR to match your pod network CIDR. - -::: - - - - -Install {{prodname}} by using the following command. - -```bash -kubectl apply -f {{manifestsUrl}}/manifests/calico.yaml -``` - -:::note - -You can also -[view the YAML in a new tab]({{manifestsUrl}}/manifests/calico.yaml). - -::: - -You should see the following output. - -``` - configmap/calico-config created - customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created - clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created - clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created - clusterrole.rbac.authorization.k8s.io/calico-node created - clusterrolebinding.rbac.authorization.k8s.io/calico-node created - daemonset.apps/calico-node created - serviceaccount/calico-node created - deployment.apps/calico-kube-controllers created - serviceaccount/calico-kube-controllers created -``` - - - - -### Check the installation - -1. Confirm that all of the pods are running using the following command. - - - - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -tigera-operator tigera-operator-c9cf5b94d-gj9qp 1/1 Running 0 107s -calico-system calico-typha-7dcd87597-npqsf 1/1 Running 0 88s -calico-system calico-node-rdwwz 1/1 Running 0 88s -kube-system local-path-provisioner-6d59f47c7-4q8l2 1/1 Running 0 2m14s -kube-system metrics-server-7566d596c8-xf66d 1/1 Running 0 2m14s -kube-system coredns-8655855d6-wfdbm 1/1 Running 0 2m14s -calico-system calico-kube-controllers-89df8c6f8-7hxc5 1/1 Running 0 87s -``` - - - - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system {{noderunning}}-9hn9z 1/1 Running 0 23m -kube-system local-path-provisioner-6d59f47c7-drznc 1/1 Running 0 38m -kube-system calico-kube-controllers-789f6df884-928lt 1/1 Running 0 23m -kube-system metrics-server-7566d596c8-qxlfz 1/1 Running 0 38m -kube-system coredns-8655855d6-blzl5 1/1 Running 0 38m -``` - - - - -1. Confirm that you now have two nodes in your cluster with the following command. - - ```bash - kubectl get nodes -o wide - ``` - - It should return something like the following. - - ``` - NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME - k3s-master Ready master 40m v1.18.2+k3s1 172.16.2.128 Ubuntu 18.04.3 LTS 4.15.0-101-generic containerd://1.3.3-k3s2 - k3s-node1 Ready 30m v1.18.2+k3s1 172.16.2.129 Ubuntu 18.04.3 LTS 4.15.0-101-generic containerd://1.3.3-k3s2 - ``` - -Congratulations! You now have a multi node K3s cluster -equipped with {{prodname}} and Traefik. - -## Next steps - -- Try running the [Kubernetes Network policy demo](../../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) to see live graphical view of network policy in action diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/quickstart.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/quickstart.mdx deleted file mode 100644 index cca3c4bf10..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/k3s/quickstart.mdx +++ /dev/null @@ -1,201 +0,0 @@ ---- -description: Install Calico on a single-node K3s cluster for testing or development in under 5 minutes. ---- - -# Quickstart for Calico on K3s - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -This quickstart gets you a single-node K3s cluster with {{prodname}} -in approximately 5 minutes. You can use this cluster for testing and -development. - -## Value - -Use this quickstart to quickly and easily try {{prodname}} features. To deploy a cluster suitable for production, refer to [Multi-node install](multi-node-install.mdx). - -The geeky details of what you get: - - - -## Before you begin - -- Make sure you have a linux host that meets the following requirements - - x86-64 processor - - 1CPU - - 1GB Ram - - 10GB free disk space - - Ubuntu 18.04 (amd64), Ubuntu 20.04 (amd64) - -:::note - -K3s supports ARM processors too, this quickstart was tested against x86-64 processor environment. -For more detail please visit [this link](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#operating-systems). - -::: - -## How to - -### Create a single-node K3s cluster - -- Initialize the control plane using the following command: - -```bash -curl -sfL https://get.k3s.io | K3S_KUBECONFIG_MODE="644" INSTALL_K3S_EXEC="--flannel-backend=none --cluster-cidr=192.168.0.0/16 --disable-network-policy --disable=traefik" sh - -``` - -:::note - -- If 192.168.0.0/16 is already in use within your network you must select a different pod network -CIDR by replacing 192.168.0.0/16 in the above command. - -- K3s installer generates `kubeconfig` file in `etc` directory with limited permissions, using `K3S_KUBECONFIG_MODE` environment -you are assigning necessary permissions to the file and make it accessible for other users. - -::: - -### Install {{prodname}} - - - - -1. Install the {{prodname}} operator and custom resource definitions. - -```bash -kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml -``` - -:::note - -Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Therefore, it is recommended to use `kubectl create` or `kubectl replace`. - -::: - -2. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx). - -```bash -kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml -``` - -:::note - -Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example, -you may need to change the default IP pool CIDR to match your pod network CIDR. - -::: - - - - -Install {{prodname}} by using the following command. - -```bash -kubectl apply -f {{manifestsUrl}}/manifests/calico.yaml -``` - -:::note - -You can also -[view the YAML in a new tab]({{manifestsUrl}}/manifests/calico.yaml). - -::: - -You should see the following output. - -``` - configmap/calico-config created - customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created - customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created - clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created - clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created - clusterrole.rbac.authorization.k8s.io/calico-node created - clusterrolebinding.rbac.authorization.k8s.io/calico-node created - daemonset.apps/calico-node created - serviceaccount/calico-node created - deployment.apps/calico-kube-controllers created - serviceaccount/calico-kube-controllers created -``` - - - - -### Final checks - -1. Confirm that all of the pods are running using the following command. - -```bash -watch kubectl get pods --all-namespaces -``` - -2. Wait until each pod shows the `STATUS` of `Running`. - - - - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -tigera-operator tigera-operator-c9cf5b94d-gj9qp 1/1 Running 0 107s -calico-system calico-typha-7dcd87597-npqsf 1/1 Running 0 88s -calico-system calico-node-rdwwz 1/1 Running 0 88s -kube-system local-path-provisioner-6d59f47c7-4q8l2 1/1 Running 0 2m14s -kube-system metrics-server-7566d596c8-xf66d 1/1 Running 0 2m14s -kube-system coredns-8655855d6-wfdbm 1/1 Running 0 2m14s -calico-system calico-kube-controllers-89df8c6f8-7hxc5 1/1 Running 0 87s -``` - - - - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system {{noderunning}}-9hn9z 1/1 Running 0 23m -kube-system local-path-provisioner-6d59f47c7-drznc 1/1 Running 0 38m -kube-system calico-kube-controllers-789f6df884-928lt 1/1 Running 0 23m -kube-system metrics-server-7566d596c8-qxlfz 1/1 Running 0 38m -kube-system coredns-8655855d6-blzl5 1/1 Running 0 38m -``` - - - - -3. Press CTRL+C to exit `watch`. - -4. Confirm that you now have a node in your cluster with the - following command. - -```bash -kubectl get nodes -o wide -``` - -It should return something like the following. - -``` -NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME -k3s-master Ready master 40m v1.18.2+k3s1 172.16.2.128 Ubuntu 18.04.3 LTS 4.15.0-101-generic containerd://1.3.3-k3s2 -``` - -Congratulations! You now have a single-node K3s cluster -equipped with {{prodname}}. - -## Next steps - -- Try running the [Kubernetes Network policy demo](../../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) to see live graphical view of network policy in action diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/aks.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/aks.mdx deleted file mode 100644 index f87505064e..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/aks.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -description: Enable Calico network policy in AKS. ---- - -# Microsoft Azure Kubernetes Service (AKS) - -## Big picture - -Enable {{prodname}} in AKS managed Kubernetes service. - -## Value - -AKS has built-in support for {{prodname}}, providing a robust implementation of the full Kubernetes Network Policy API. AKS users wanting to go beyond Kubernetes network policy capabilities can make full use of the {{prodname}} Network Policy API. - -You can also use {{prodname}} for networking on AKS in place of the default Azure VPC networking. This allows you to take advantage of the full set of {{prodname}} networking features. - -## How to - -### Install AKS with {{prodname}} for network policy - -The geeky details of what you get: - - - -To enable {{prodname}} network policy enforcement, follow these step-by-step instructions: [Create an AKS cluster and enable network policy](https://docs.microsoft.com/en-us/azure/aks/use-network-policies). - -### Install AKS with {{prodname}} networking - -**Limitations** - -- [Windows dataplane](../windows-calico/index.mdx) is not supported. -- [eBPF dataplane](../../../operations/ebpf/use-cases-ebpf.mdx) is not supported. -- [VPP dataplane](https://github.com/projectcalico/vpp-dataplane) is not supported. - -The geeky details of what you get: - - - -1. Create an Azure AKS cluster with no Kubernetes CNI pre-installed. Please refer to [Bring your own CNI with AKS](https://docs.microsoft.com/en-us/azure/aks/use-byo-cni?tabs=azure-cli) for details. - - ``` - # Create a resource group - az group create --name my-calico-rg --location westcentralus - - az aks create --resource-group my-calico-rg --name my-calico-cluster --location westcentralus --pod-cidr 192.168.0.0/16 --network-plugin none - ``` - -1. Get credentials to allow you to access the cluster with `kubectl`: - - ``` - az aks get-credentials --resource-group my-calico-rg --name my-calico-cluster - ``` - -1. Now that you have a cluster configured, you can install {{prodname}}. - -1. Install the operator. - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - -1. Configure the {{prodname}} installation. - - ```bash - kubectl create -f - < - - -1. First, create an Amazon EKS cluster. - - ```bash - eksctl create cluster --name - ``` - -1. Install the operator. - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - -1. Configure the {{prodname}} installation. - - ```bash - kubectl create -f - < Ready master 52m v1.12.2 10.128.0.28 Ubuntu 18.04.1 LTS 4.15.0-1023-gcp docker://18.6.1 - ``` - -### Install EKS with {{prodname}} networking - -The geeky details of what you get: - - - -:::note - -{{prodname}} networking cannot currently be installed on the EKS control plane nodes. As a result the control plane nodes -will not be able to initiate network connections to {{prodname}} pods. (This is a general limitation of EKS's custom networking support, -not specific to {{prodname}}.) As a workaround, trusted pods that require control plane nodes to connect to them, such as those implementing -admission controller webhooks, can include `hostNetwork:true` in their pod spec. See the Kubernetes API -[pod spec](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#PodSpec) -definition for more information on this setting. - -::: - -For these instructions, we will use `eksctl` to provision the cluster. However, you can use any of the methods in [Getting Started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) - -Before you get started, make sure you have downloaded and configured the [necessary prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html#eksctl-prereqs) - -1. First, create an Amazon EKS cluster without any nodes. - - ```bash - eksctl create cluster --name my-calico-cluster --without-nodegroup - ``` - -1. Since this cluster will use {{prodname}} for networking, you must delete the `aws-node` daemon set to disable AWS VPC networking for pods. - - ```bash - kubectl delete daemonset -n kube-system aws-node - ``` - -1. Now that you have a cluster configured, you can install {{prodname}}. - - - - -1. Install the operator. - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - -1. Configure the {{prodname}} installation. - - ```bash - kubectl create -f - < - - -1. Install the {{prodname}} manifest. - - ```bash - kubectl apply -f {{manifestsUrl}}/manifests/calico-vxlan.yaml - ``` - -1. Configure {{prodname}} to disable AWS src/dst checks. - - ```bash - kubectl -n kube-system set env daemonset/calico-node FELIX_AWSSRCDSTCHECK=Disable - ``` - -1. Finally, add nodes to the cluster. - - ```bash - eksctl create nodegroup --cluster my-calico-cluster --node-type t3.medium --max-pods-per-node 100 - ``` - - - - -1. Add {{prodname}} into your Helm repository. - - ```batch - helm repo add projectcalico https://docs.tigera.io/calico/charts - ``` - -1. If {{prodname}} is already added, update it to get the latest released version. - - ```batch - helm repo update - ``` - -1. Install version {{releaseTitle}} of the {{prodname}} operator and custom resource definitions. - - ```batch - helm install calico projectcalico/tigera-operator --version {{releaseTitle}} - ``` - -1. Patch the CNI type with value `Calico`. - - ```batch - kubectl patch installation default --type='json' -p='[{"op": "replace", "path": "/spec/cni", "value": {"type":"Calico"} }]' - ``` - -1. Finally, add nodes to the cluster. - - ```batch - eksctl create nodegroup --cluster my-calico-cluster --node-type t3.medium --max-pods-per-node 100 - ``` - - - - -:::tip - - Without the `--max-pods-per-node` option above, EKS will limit the [number of pods based on node-type](https://github.com/awslabs/amazon-eks-ami/blob/main/nodeadm/internal/kubelet/eni-max-pods.txt). See `eksctl create nodegroup --help` for the full set of node group options. - -::: - -## Next steps - -**Required** - -- [Install calicoctl command line tool](../../../operations/calicoctl/install.mdx) - -**Recommended** - -- [Video: Everything you need to know about Kubernetes pod networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/) -- [Get started with Kubernetes network policy](../../../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx) -- [Get started with {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) -- [Enable default deny for Kubernetes pods](../../../network-policy/get-started/kubernetes-default-deny.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/gke.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/gke.mdx deleted file mode 100644 index 13c4c3b9fe..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/gke.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -description: Enable Calico network policy in GKE. ---- - -# Google Kubernetes Engine (GKE) - -## Big picture - -Enable {{prodname}} in GKE managed Kubernetes service. - -## Value - -GKE has built-in support for {{prodname}}, providing a robust implementation of the full Kubernetes Network Policy API. GKE users wanting to go beyond Kubernetes network policy capabilities can make full use of the {{prodname}} Network Policy API. - -## How to - -To enable {{prodname}} network policy enforcement, follow these step-by-step instructions: -[Enabling network policy enforcement](https://cloud.google.com/kubernetes-engine/docs/how-to/network-policy). - -The geeky details of what you get: - - - -## Next steps - -**Required** - -- [Install calicoctl command line tool](../../../operations/calicoctl/install.mdx) - -**Recommended** - -- [Video: Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/) -- [Get started with Kubernetes network policy](../../../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx) -- [Get started with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) -- [Enable default deny for Kubernetes pods](../../../network-policy/get-started/kubernetes-default-deny.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/iks.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/iks.mdx deleted file mode 100644 index 6312df7eee..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/iks.mdx +++ /dev/null @@ -1,36 +0,0 @@ ---- -description: Use IKS with built-in support for Calico networking and network policy. ---- - -# IBM Cloud Kubernetes Service (IKS) - -## Big picture - -Enable {{prodname}} in IKS managed Kubernetes service. - -## Value - -IKS has built-in support for {{prodname}}, providing a robust implementation of the full Kubernetes Network Policy API. IKS users wanting to go beyond Kubernetes network policy capabilities can make full use of the {{prodname}} Network Policy API. In addition to using {{prodname}} to secure Kubernetes pods, IKS also uses {{prodname}} host endpoint capabilities to provide additional security for the nodes in your cluster. - -## How to - -{{prodname}} networking and network policy are automatically installed and configured in your [IBM Cloud Kubernetes Service](https://www.ibm.com/products/kubernetes-service/). Default policies are created to protect your Kubernetes cluster, with the option to create your own policies to protect specific services. - -The geeky details of what you get: - - - -## Next steps - -**Required** - -- [Install calicoctl command line tool](../../../operations/calicoctl/install.mdx) - -**Recommended** - -- [Controlling traffic with network policies for IKS](https://cloud.ibm.com/docs/containers?topic=containers-network_policies) -- [Get started with Kubernetes network policy](../../../network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx) -- [Get started with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/index.mdx deleted file mode 100644 index 8b3bc22c42..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/managed-public-cloud/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Enable Calico on EKS, GKE, AKS, or IKS. -hide_table_of_contents: true ---- - -# Managed public cloud - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/microk8s.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/microk8s.mdx deleted file mode 100644 index a1e453423c..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/microk8s.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: Install Calico on a single-host MicroK8s cluster for testing or development in under 5 minutes. ---- - -# Quickstart for Calico on MicroK8s - -## Big picture - -Install a single node MicroK8s cluster with {{prodname}} in approximately 5 minutes. - -## Value - -MicroK8s is a lightweight upstream Kubernetes distribution package to run as an immutable container. - -Use this quickstart to quickly and easily try {{prodname}} features with MicroK8s. - -## Before you begin - -- Make sure you have a linux host that meets the following requirements: - - 4GB RAM - - 20GB free disk space - - Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS (or another operating system that supports `snapd`) - -## How to - -1. Initialize the node using the following command. - - ``` - snap install microk8s --classic - ``` - - :::note - - You can check out other versions of Kubernetes MicroK8s implementation published in snap using `snap info microk8s` command. - - ::: - -1. Enable dns services. - - ``` - microk8s enable dns - ``` - -1. Check your cluster status - - ``` - microk8s kubectl get pods -A - ``` - - You should see a result similar to - - ``` - NAMESPACE NAME READY STATUS RESTARTS AGE - kube-system calico-node-b82zp 1/1 Running 0 64s - kube-system calico-kube-controllers-555fc8cc5c-b7cp6 1/1 Running 0 64s - kube-system coredns-588fd544bf-mbc7n 1/1 Running 0 39s - ``` - -The geeky details of what you get: - - - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../operations/calicoctl/install.mdx) - -**Optional** - -- [Add another node to form a multi-node cluster](https://microk8s.io/docs/clustering) - -**Recommended tutorials** - -- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx) -- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx) -- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/minikube.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/minikube.mdx deleted file mode 100644 index c8c49267b0..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/minikube.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -description: Enable Calico on a single/multi-node minikube cluster for testing or development in under 1 minute. ---- - -# Quickstart for Calico on minikube - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -This quickstart gets you a single-node minikube cluster with {{prodname}} -in approximately 1 minute. You can use this cluster for testing and -development. - -## Value - -Use this quickstart to quickly and easily try {{prodname}} features. - -## Before you begin - -- Install, but do not start, minikube. [How to install minikube](https://minikube.sigs.k8s.io/docs/start/#what-youll-need) -- Install kubectl.[How to install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install a minikube driver. For example Docker. A full List of available drivers can be [found here.](https://minikube.sigs.k8s.io/docs/drivers/) - -## How to - -### Create a single-node minikube cluster - - - - -Minikube offers a built-in {{prodname}} implementation, this is a quick way to checkout {{prodname}} features. - -:::note - -Enabling preinstalled {{prodname}} might be the quickest way for testing. However, if you like to checkout a more recent version or features of {{prodname}} you should consider using Manifest or Operator approach. - -::: - -```bash -minikube start --network-plugin=cni --cni=calico -``` - - - - -1. Start your minikube cluster with one control plane node using the following command. - -:::note - -If `192.168.0.0/16` is already in use within your network you must select a different pod network CIDR, by replacing `192.168.0.0/16` in the following command. - -::: - -```bash -minikube start --cni=false --network-plugin=cni --extra-config=kubeadm.pod-network-cidr=192.168.0.0/16 --subnet=172.16.0.0/24 -``` - -2. Install the Tigera {{prodname}} operator and custom resource definitions. - -```bash -kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml -``` - -:::note - -Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - -::: - -3. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx). - -:::note - -Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example, -if you have replaced `pod-network-cidr` you must change it in this file as well. - -::: - -```bash -kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml -``` - - - - -Start your minikube cluster with one control plane node using the following command: - -```bash -minikube start --network-plugin=cni -``` - -Install {{prodname}}. - -```bash -kubectl apply -f {{manifestsUrl}}/manifests/calico.yaml -``` - - - - -### Verify {{prodname}} installation - -Verify {{prodname}} installation in your cluster using the following command: - -```bash -watch kubectl get pods -l k8s-app=calico-node -A -``` - -You should see a result similar to the below. Note that the namespace might be different, depending on the method you followed. - -``` -NAMESPACE NAME READY STATUS RESTARTS AGE -kube-system calico-node-mlqvs 1/1 Running 0 5m18s -``` - -Use `ctrl+c` to break out of watch. - -Congratulations you now have a minikube cluster equipped with {{prodname}} - -### Add an additional worker node - -:::note - -This as an optional step, you can safely skip this step if you do not require an additional worker node. - -::: - -```bash -minikube node add -``` - -Verify nodes using the following command: - -```bash -kubectl get nodes -``` - -### Clean up - -Delete the cluster using the following command: - -```bash -minikube delete -``` - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../operations/calicoctl/install.mdx) - -**Recommended tutorials** - -- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx) -- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx) -- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/index.mdx deleted file mode 100644 index b6bf7d2303..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico on OpenShift for networking and network policy. -hide_table_of_contents: true ---- - -# OpenShift - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/installation.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/installation.mdx deleted file mode 100644 index 219bbc3a25..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/openshift/installation.mdx +++ /dev/null @@ -1,187 +0,0 @@ ---- -description: Install Calico on an OpenShift 4 cluster. ---- - -# Install an OpenShift 4 cluster with Calico - -## Big picture - -Install an OpenShift 4 cluster with {{prodname}}. - -## Value - -Augments the applicable steps in the [OpenShift documentation](https://cloud.redhat.com/openshift/install) -to install {{prodname}}. - -## How to - -### Before you begin - -- Ensure that your environment meets the {{prodname}} [system requirements](requirements.mdx). - -- Ensure that you have a [RedHat account](https://cloud.redhat.com/). A RedHat account is required to get the pull secret necessary to provision an OpenShift cluster. Note that the OpenShift installer supports a subset of AWS regions. - -- If installing on AWS, ensure that you have: - - - Configured an AWS account appropriate for OpenShift 4 - - [Set up your AWS credentials](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html) - - Generated a local SSH private key and added it to your ssh-agent - -### Create a configuration file for the OpenShift installer - -First, create a staging directory for the installation. This directory will contain the configuration file, along with cluster state files, that OpenShift installer will create: - -``` -mkdir openshift-tigera-install && cd openshift-tigera-install -``` - -Now run OpenShift installer to create a default configuration file: - -``` -openshift-install create install-config -``` - -:::note - -Refer to the [OpenShift installer documentation](https://cloud.redhat.com/openshift/install) for more information -about the installer and any configuration changes required for your platform. - -::: - -Once the installer has finished, your staging directory will contain the configuration file `install-config.yaml`. - -### Update the configuration file to use {{prodname}} - -Override the OpenShift networking to use Calico and update the AWS instance types to meet the [system requirements](requirements.mdx): - -```bash -sed -i 's/\(OpenShiftSDN\|OVNKubernetes\)/Calico/' install-config.yaml -``` - -### Generate the install manifests - -Now generate the Kubernetes manifests using your configuration file: - -```bash -openshift-install create manifests -``` - - - -### Optionally provide additional configuration - -You may want to provide Calico with additional configuration at install-time. For example, BGP configuration or peers. -You can use a Kubernetes ConfigMap with your desired Calico resources to set configuration as part of the installation. -If you do not need to provide additional configuration, you can skip this section. - -To include [Calico resources](../../../reference/resources/index.mdx) during installation, edit `manifests/02-configmap-calico-resources.yaml` to add your own configuration. - -:::note - -If you have a directory with the Calico resources, you can create the file with the command: - -``` -oc create configmap -n tigera-operator calico-resources \ ---from-file= --dry-run -o yaml \ -manifests/02-configmap-calico-resources.yaml -``` - -With recent versions of oc it is necessary to have a kubeconfig configured or add `--server='127.0.0.1:443'` -even though it is not used. - -::: - -:::note - -If you have provided a `calico-resources` configmap and the tigera-operator pod fails to come up with `Init:CrashLoopBackOff`, -check the output of the init-container with `oc logs -n tigera-operator -l k8s-app=tigera-operator -c create-initial-resources`. - -::: - -### Create the cluster - -Start the cluster creation with the following command and wait for it to complete. - -```bash -openshift-install create cluster -``` - -Once the above command is complete, you can verify {{prodname}} is installed by verifying the components are available with the following command. - -``` -oc get tigerastatus -``` - -:::note - -To get more information, add `-o yaml` to the above command. - -::: - -### Optionally integrate with Operator Lifecycle Manager (OLM) - -In OpenShift Container Platform, the [Operator Lifecycle Manager](https://docs.openshift.com/container-platform/4.4/operators/understanding_olm/olm-understanding-olm.html#olm-overview_olm-understanding-olm) helps -cluster administrators manage the lifecycle of operators in their cluster. Managing the {{prodname}} -operator with OLM gives administrators a single place to manage operators. - -To register the running {{prodname}} operator with OLM, first you will need to create an OperatorGroup for the operator: - -```bash -oc apply -f - < - -## OpenShift requirements - -{{prodname}} supports the [OpenShift Container Platform](https://docs.openshift.com/). - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/quickstart.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/quickstart.mdx deleted file mode 100644 index e3067b56a8..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/quickstart.mdx +++ /dev/null @@ -1,169 +0,0 @@ ---- -description: Install Calico on a single-host Kubernetes cluster for testing or development in under 15 minutes. ---- - -# Quickstart for Calico on Kubernetes - -## Big picture - -This quickstart gets you a single-host Kubernetes cluster with {{prodname}} in approximately 15 minutes. - -## Value - -Use this quickstart to quickly and easily try {{prodname}} features. To deploy a cluster suitable for production, refer to [{{prodname}} on Kubernetes](../kubernetes/index.mdx). - -## Before you begin - -**Required** - -- A Linux host that meets the following requirements: - - - x86-64, arm64, ppc64le, or s390x processor - - 2CPU - - 2GB RAM - - 10GB free disk space - - RedHat Enterprise Linux 7.x+, CentOS 7.x+, Ubuntu 18.04+, or Debian 9.x+ - -- {{prodname}} can manage `cali` and `tunl` interfaces on the host - - If NetworkManager is present on the host, see [Configure NetworkManager](../../operations/troubleshoot/troubleshooting.mdx#configure-networkmanager). - -## Concepts - -### Operator based installation - -This quickstart guide uses the Tigera operator to install {{prodname}}. The operator provides lifecycle management for Calico -exposed via the Kubernetes API defined as a custom resource definition. - -:::note - -It is also possible to install Calico without an operator using Kubernetes manifests directly. -For platforms and guides that do not use the Tigera operator, you may notice some differences in the steps and Kubernetes -resources compared to those presented in this guide. - -::: - -## How to - -The geeky details of what you get: - - - -### Create a single-host Kubernetes cluster - -1. [Follow the Kubernetes instructions to install kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) - - :::note - - After installing kubeadm, do not power down or restart - the host. Instead, continue directly to the next step. - - ::: - -1. As a regular user with sudo privileges, open a terminal on the host that you installed kubeadm on. - -1. Initialize the control plane using the following command. - - ``` - sudo kubeadm init --pod-network-cidr=192.168.0.0/16 - ``` - - :::note - - If 192.168.0.0/16 is already in use within your network you must select a different pod network - CIDR, replacing 192.168.0.0/16 in the above command. - - ::: - -1. Execute the following commands to configure kubectl (also returned by `kubeadm init`). - - ``` - mkdir -p $HOME/.kube - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config - sudo chown $(id -u):$(id -g) $HOME/.kube/config - ``` - -### Install {{prodname}} - -1. Install the Tigera {{prodname}} operator and custom resource definitions. - - ``` - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - - :::note - - Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - - ::: - -1. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx). - - ``` - kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml - ``` - - :::note - - Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example, - you may need to change the default IP pool CIDR to match your pod network CIDR. - - ::: - -1. Confirm that all of the pods are running with the following command. - - ``` - watch kubectl get pods -n calico-system - ``` - - Wait until each pod has the `STATUS` of `Running`. - - :::note - - The Tigera operator installs resources in the `calico-system` namespace. Other install methods may use - the `kube-system` namespace instead. - - ::: - -1. Remove the taints on the control plane so that you can schedule pods on it. - - ```bash - kubectl taint nodes --all node-role.kubernetes.io/control-plane- - kubectl taint nodes --all node-role.kubernetes.io/master- - ``` - - It should return the following. - - ``` - node/ untainted - ``` - -1. Confirm that you now have a node in your cluster with the following command. - - ``` - kubectl get nodes -o wide - ``` - - It should return something like the following. - - ``` - NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME - Ready master 52m v1.12.2 10.128.0.28 Ubuntu 18.04.1 LTS 4.15.0-1023-gcp docker://18.6.1 - ``` - -Congratulations! You now have a single-host Kubernetes cluster with {{prodname}}. - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../operations/calicoctl/install.mdx) - -**Recommended tutorials** - -- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx) -- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx) -- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/rancher.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/rancher.mdx deleted file mode 100644 index 9a4bc35c62..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/rancher.mdx +++ /dev/null @@ -1,96 +0,0 @@ ---- -description: Install Calico on a Rancher Kubernetes Engine cluster. ---- - -# Install Calico on a Rancher Kubernetes Engine cluster - -## Big picture - -Install {{prodname}} as the required CNI for networking and/or network policy on Rancher-deployed clusters. - -## Concepts - -{{prodname}} supports the Calico CNI with Calico network policy: - -The geeky details of what you get: - - - -## Before you begin - -**Required** - -- A compatible [Rancher Kubernetes Engine cluster](https://rancher.com/docs/rke/latest/en/) with version 1.3 - - - Configure your cluster with a [Cluster Config File](https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/options/#cluster-config-file) and specify [no network plugin](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins/) by setting `plugin: none` under `network` in your configuration file. - -- RKE cluster meets the [{{prodname}} requirements](requirements.mdx) - -- A `kubectl` environment with access to your cluster - - - Use [Rancher kubectl Shell](https://rancher.com/docs/rancher/v2.x/en/cluster-admin/cluster-access/kubectl/) for access - - Ensure you have the [Kubeconfig file that was generated when you created the cluster](https://rancher.com/docs/rke/latest/en/installation/#save-your-files). - -- If using a Kubeconfig file locally, [install and set up the Kubectl CLI tool](https://kubernetes.io/docs/tasks/tools/install-kubectl/). - -## How to - -- [Install {{prodname}}](#install-calico) - -### Install {{prodname}} - -1. Install the Tigera {{prodname}} operator and custom resource definitions. - - ``` - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - - :::note - - Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - - ::: - -1. Install {{prodname}} by creating the necessary custom resource. For more information on configuration options available in this manifest, see [the installation reference](../../reference/installation/api.mdx). - - ``` - kubectl create -f {{manifestsUrl}}/manifests/custom-resources.yaml - ``` - - :::note - - Before creating this manifest, read its contents and make sure its settings are correct for your environment. For example, - you may need to change the default IP pool CIDR to match your pod network CIDR. Rancher uses `10.42.0.0/16` by default. - - ::: - - :::note - - If you are installing {{prodname}} on Windows nodes in this cluster, please see the [{{prodnameWindows}} for RKE](windows-calico/kubernetes/rancher.mdx) installation instructions. - - ::: - -1. Confirm that all of the pods are running with the following command. - - ``` - watch kubectl get pods -n calico-system - ``` - - Wait until each pod has the `STATUS` of `Running`. - -Congratulations! You now have an RKE cluster running {{prodname}} - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../operations/calicoctl/install.mdx) - -**Recommended tutorials** - -- [Secure a simple application using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx) -- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx) -- [Run a tutorial that shows blocked and allowed connections in real time](../../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/requirements.mdx deleted file mode 100644 index 690cda00ec..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/requirements.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: Review requirements before installing Calico to ensure success. ---- - -# System requirements - - - -## Kubernetes requirements - -#### Supported versions - -We test {{prodname}} {{version}} against the following Kubernetes versions. Other versions may work, but we are not actively testing them. - -- v1.23 -- v1.24 -- v1.25 -- v1.26 -- v1.27 -- v1.28 - -Due to changes in the Kubernetes API, {{prodname}} {{version}} will not work -on Kubernetes v1.15 or below. v1.16-v1.18 may work, but they are no longer tested. -Newer versions may also work, but we recommend upgrading to a version of {{prodname}} -that is tested against the newer Kubernetes version. - -#### CNI plug-in enabled - -For Kubernetes 1.24 or later, {{prodname}} must be installed as a CNI plugin in the container runtime. - -This installation must use the Kubernetes default CNI configuration directory (`/etc/cni/net.d`) and binary directory (`/opt/cni/bin`). - -For Kubernetes 1.23 or earlier, the kubelet must be configured to use CNI networking by passing the `--network-plugin=cni` argument. -(On kubeadm, this is the default.) - -#### Other network providers - -Generally, you cannot use {{prodname}} together with another network provider. - -Notable exceptions include the following: - -* [flannel](flannel/index.mdx) -* Platform-specific CNIs, such as the [AWS VPC CNI](https://github.com/aws/amazon-vpc-cni-k8s/blob/master/README.md) and [Azure VNET CNI](https://github.com/Azure/azure-container-networking/blob/master/docs/cni.md) plugins. - -If you're working with a cluster that already uses another CNI, you cannot migrate to {{prodname}}. - -#### Supported kube-proxy modes - -{{prodname}} supports the following kube-proxy modes: - -- `iptables` (default) -- `ipvs` Requires Kubernetes ≥ v1.9.3. Refer to - [Use IPVS kube-proxy](../../networking/configuring/use-ipvs.mdx) for more details. - -#### IP pool configuration - -The IP range selected for pod IP addresses cannot overlap with any other -IP ranges in your network, including: - -- The Kubernetes service cluster IP range -- The range from which host IPs are allocated - -## Application layer policy requirements - -- [MutatingAdmissionWebhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#mutatingadmissionwebhook) enabled -- Istio [v1.9](https://istio.io/v1.9/) or [v1.10](https://archive.istio.io/v1.10/) - -Note that Kubernetes version 1.16+ requires Istio version 1.2 or greater. -Note that Istio version 1.9 requires Kubernetes version 1.17-1.20. -Note that Istio version 1.10 is supported on Kubernetes version 1.18-1.21, but has been tested on Kubernetes version 1.22. - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/config-options.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/config-options.mdx deleted file mode 100644 index 7b4ff81146..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/config-options.mdx +++ /dev/null @@ -1,355 +0,0 @@ ---- -description: Optionally customize Calico prior to installation. ---- - -# Customize Calico configuration - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Perform common customizations of a {{prodname}} installation. - -## Concepts - -### {{prodname}} operator - -{{prodname}} is installed by an operator which manages the installation, upgrade, and general lifecycle of a {{prodname}} cluster. The operator is -installed directly on the cluster as a Deployment, and is configured through one or more custom Kubernetes API resources. - -### {{prodname}} manifests - -{{prodname}} can also be installed using raw manifests as an alternative to the operator. The manifests contain the necessary resources for installing {{prodname}} on each node in your Kubernetes cluster. Using manifests is not recommended as they cannot automatically manage the lifecycle of the {{prodname}} as the operator does. However, manifests may be useful for clusters that require highly specific modifications to the underlying Kubernetes resources. - -## How to - - - - -### About customizing an operator install - -Operator installations read their configuration from a specific set of Kubernetes APIs. These APIs are installed on the cluster -as part of `tigera-operator.yaml` in the `operator.tigera.io/v1` API group. - -- [Installation](../../../reference/installation/api.mdx#operator.tigera.io/v1.Installation): a singleton resource with name "default" that - configures common installation parameters for a {{prodname}} cluster. -- [APIServer](../../../reference/installation/api.mdx#operator.tigera.io/v1.Installation): a singleton resource with name "default" that - configures installation of the {{prodname}} API server extension. - -### Configure the pod IP range - -For many environments, {{prodname}} will auto-detect the correct pod IP range to use, or select an unused range on the cluster. - -You can select a specific pod IP range by modifying the `spec.calicoNetwork.ipPools` array in the Installation API resource. - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - calicoNetwork: - ipPools: - - cidr: 198.51.100.0/24 -``` - -:::note - -the ipPools array can take at most one IPv4 and one IPv6 CIDR, and only takes effect when installing {{prodname}} for the first -time on a given cluster. To add additional pools, see [the IPPool API](../../../reference/resources/ippool.mdx). - -::: - -### Use VXLAN - -You can enable VXLAN in a cluster by setting the option on your IPv4 pool. You can also disable BGP via the `spec.calicoNetwork.bgp` field. - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - calicoNetwork: - bgp: Disabled - ipPools: - - cidr: 198.51.100.0/24 - encapsulation: VXLAN -``` - - - - -We provide a number of manifests to make deployment of {{prodname}} easy. You can optionally -modify the manifests before applying them. Or you can modify the manifest and reapply it to change -settings as needed. - -### About customizing {{prodname}} manifests - -Each manifest contains all the necessary resources for installing {{prodname}} -on each node in your Kubernetes cluster. - -It installs the following Kubernetes resources: - -- Installs the `{{nodecontainer}}` container on each host using a DaemonSet. -- Installs the {{prodname}} CNI binaries and network config on each host using a DaemonSet. -- Runs `calico/kube-controllers` as a deployment. -- The `calico-etcd-secrets` secret, which optionally allows for providing etcd TLS assets. -- The `calico-config` ConfigMap, which contains parameters for configuring the install. - -The sections that follow discuss the configurable parameters in greater depth. - -### Configure the pod IP range - -{{prodname}} IPAM assigns IP addresses from [IP pools](../../../reference/resources/ippool.mdx). - -To change the default IP range used for pods, modify the `CALICO_IPV4POOL_CIDR` -section of the `calico.yaml` manifest. For more information, see -[Configuring {{nodecontainer}}](../../../reference/configure-calico-node.mdx). - -## Configuring IP-in-IP - -By default, the manifests enable IP-in-IP encapsulation across subnets. Many users may -want to disable IP-in-IP encapsulation, such as under the following circumstances. - -- Their cluster is [running in a properly configured AWS VPC](../../../reference/public-cloud/aws.mdx). -- All their Kubernetes nodes are connected to the same layer 2 network. -- They intend to use BGP peering to make their underlying infrastructure aware of - pod IP addresses. - -To disable IP-in-IP encapsulation, modify the `CALICO_IPV4POOL_IPIP` section of the -manifest. For more information, see [Configuring {{nodecontainer}}](../../../reference/configure-calico-node.mdx). - -## Switching from IP-in-IP to VXLAN - -By default, the Calico manifests enable IP-in-IP encapsulation. If you are on a network that blocks IP-in-IP, such -as Azure, you may wish to switch to [Calico's VXLAN encapsulation mode](../../../networking/configuring/vxlan-ipip.mdx). -To do this at install time (so that Calico creates the default IP pool with VXLAN and no IP-in-IP configuration has to -be undone): - -- Start with one of the [Calico for policy and networking](config-options.mdx) manifests. -- Replace environment variable name `CALICO_IPV4POOL_IPIP` with`CALICO_IPV4POOL_VXLAN`. Leave the value of the new variable as "Always". -- Optionally, (to save some resources if you're running a VXLAN-only cluster) completely disable Calico's BGP-based - networking: - - Replace `calico_backend: "bird"` with `calico_backend: "vxlan"`. This disables BIRD. - - Comment out the line `- -bird-ready` and `- -bird-live` from the calico/node readiness/liveness check (otherwise disabling BIRD will cause the - readiness/liveness check to fail on every node): - -```yaml -livenessProbe: - exec: - command: - - /bin/calico-node - - -felix-live - - -bird-live -readinessProbe: - exec: - command: - - /bin/calico-node - - -bird-ready - - -felix-ready -``` - -For more information on {{nodecontainer}}'s configuration variables, including additional VXLAN settings, see -[Configuring {{nodecontainer}}](../../../reference/configure-calico-node.mdx). - -:::note - -The `CALICO_IPV4POOL_VXLAN` environment variable only takes effect when the first {{nodecontainer}} to start -creates the default IP pool. It has no effect after the pool has already been created. To switch to VXLAN mode -after installation time, use calicoctl to modify the [IPPool](../../../reference/resources/ippool.mdx) resource. - -::: - -## Configuring etcd - -By default, these manifests do not configure secure access to etcd and assume an -etcd proxy is running on each host. The following configuration options let you -specify custom etcd cluster endpoints as well as TLS. - -The following table outlines the supported `ConfigMap` options for etcd: - -| Option | Description | Default | -| -------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | -| etcd_endpoints | Comma-delimited list of etcd endpoints to connect to. | http://127.0.0.1:2379 | -| etcd_ca | The file containing the root certificate of the CA that issued the etcd server certificate. Configures `{{nodecontainer}}`, the CNI plugin, and the Kubernetes controllers to trust the signature on the certificates provided by the etcd server. | None | -| etcd_key | The file containing the private key of the `{{nodecontainer}}`, the CNI plugin, and the Kubernetes controllers client certificate. Enables these components to participate in mutual TLS authentication and identify themselves to the etcd server. | None | -| etcd_cert | The file containing the client certificate issued to `{{nodecontainer}}`, the CNI plugin, and the Kubernetes controllers. Enables these components to participate in mutual TLS authentication and identify themselves to the etcd server. | None | - -To use these manifests with a TLS-enabled etcd cluster you must do the following: - -1. Download the {{version}} manifest that corresponds to your installation method. - - **{{prodname}} for policy and networking** - - ```bash - curl {{manifestsUrl}}/manifests/calico-etcd.yaml -O - ``` - - **{{prodname}} for policy and flannel for networking** - - ```bash - curl {{manifestsUrl}}/manifests/canal.yaml -O - ``` - -1. Within the `ConfigMap` section, uncomment the `etcd_ca`, `etcd_key`, and `etcd_cert` - lines so that they look as follows. - - ```yaml - etcd_ca: '/calico-secrets/etcd-ca' - etcd_cert: '/calico-secrets/etcd-cert' - etcd_key: '/calico-secrets/etcd-key' - ``` - -1. Ensure that you have three files, one containing the `etcd_ca` value, another containing - the `etcd_key` value, and a third containing the `etcd_cert` value. - -1. Using a command like the following to strip the newlines from the files and - base64-encode their contents. - - ```bash - cat | base64 -w 0 - ``` - -1. In the `Secret` named `calico-etcd-secrets`, uncomment `etcd_ca`, `etcd_key`, and `etcd_cert` - and paste in the appropriate base64-encoded values. - - ```yaml - apiVersion: v1 - kind: Secret - type: Opaque - metadata: - name: calico-etcd-secrets - namespace: kube-system - data: - # Populate the following files with etcd TLS configuration if desired, but leave blank if - # not using TLS for etcd. - # This self-hosted install expects three files with the following names. The values - # should be base64 encoded strings of the entire contents of each file. - etcd-key: LS0tLS1CRUdJTiB...VZBVEUgS0VZLS0tLS0= - etcd-cert: LS0tLS1...ElGSUNBVEUtLS0tLQ== - etcd-ca: LS0tLS1CRUdJTiBD...JRklDQVRFLS0tLS0= - ``` - -1. Apply the manifest. - - **{{prodname}} for policy and networking** - - ```bash - kubectl apply -f calico.yaml - ``` - - **{{prodname}} for policy and flannel for networking** - - ```bash - kubectl apply -f canal.yaml - ``` - -## Authorization options - -{{prodname}}'s manifests assign its components one of two service accounts. -Depending on your cluster's authorization mode, you'll want to back these -service accounts with the necessary permissions. - -## Other configuration options - -The following table outlines the remaining supported `ConfigMap` options. - -| Option | Description | Default | -| ------------------ | --------------------------------------------------------------------------------------- | ------- | -| calico_backend | The backend to use. | `bird` | -| cni_network_config | The CNI Network config to install on each node. Supports templating as described below. | - -## CNI network configuration template - -The `cni_network_config` configuration option supports the following template fields, which will -be filled in automatically by the `calico/cni` container: - -| Field | Substituted with | -| ----------------------------- | -------------------------------------------------------------------------------------------------------------------- | -| `__KUBERNETES_SERVICE_HOST__` | The Kubernetes service Cluster IP, e.g `10.0.0.1` | -| `__KUBERNETES_SERVICE_PORT__` | The Kubernetes service port, e.g., `443` | -| `__SERVICEACCOUNT_TOKEN__` | The service account token for the namespace, if one exists. | -| `__ETCD_ENDPOINTS__` | The etcd endpoints specified in `etcd_endpoints`. | -| `__KUBECONFIG_FILEPATH__` | The path to the automatically generated kubeconfig file in the same directory as the CNI network configuration file. | -| `__ETCD_KEY_FILE__` | The path to the etcd key file installed to the host. Empty if no key is present. | -| `__ETCD_CERT_FILE__` | The path to the etcd certificate file installed to the host, empty if no cert present. | -| `__ETCD_CA_CERT_FILE__` | The path to the etcd certificate authority file installed to the host. Empty if no certificate authority is present. | - -## About customizing application layer policy manifests - -Instead of installing from our pre-modified Istio manifests, you may wish to -customize your Istio install or use a different Istio version. This section -walks you through the necessary changes to a generic Istio install manifest to -allow application layer policy to operate. - -The standard Istio manifests for the sidecar injector include a ConfigMap that -contains the template used when adding pods to the cluster. The template adds an -init container and the Envoy sidecar. Application layer policy requires -an additional lightweight sidecar called Dikastes which receives {{prodname}} policy -from Felix and applies it to incoming connections and requests. - -If you haven't already done so, download an -[Istio release](https://github.com/istio/istio/releases) and untar it to a -working directory. - -Open the `install/kubernetes/istio-demo-auth.yaml` file in an -editor, and locate the `istio-sidecar-injector` ConfigMap. In the existing `istio-proxy` container, add a new `volumeMount`. - -```yaml -- mountPath: /var/run/dikastes - name: dikastes-sock -``` - -Add a new container to the template. - -```yaml - - name: dikastes - image: {{registry}}{{imageNames.calico/dikastes}}:{{releases.0.components.calico/dikastes.version}} - args: ["server", "-l", "/var/run/dikastes/dikastes.sock", "-d", "/var/run/felix/nodeagent/socket"] - securityContext: - allowPrivilegeEscalation: false - livenessProbe: - exec: - command: - - /healthz - - liveness - initialDelaySeconds: 3 - periodSeconds: 3 - readinessProbe: - exec: - command: - - /healthz - - readiness - initialDelaySeconds: 3 - periodSeconds: 3 - volumeMounts: - - mountPath: /var/run/dikastes - name: dikastes-sock - - mountPath: /var/run/felix - name: felix-sync -``` - -Add two new volumes. - -```yaml -- name: dikastes-sock - emptyDir: - medium: Memory -- name: felix-sync - csi: - driver: 'csi.tigera.io' -``` - -The volumes you added are used to create Unix domain sockets that allow -communication between Envoy and Dikastes and between Dikastes and -Felix. Once created, a Unix domain socket is an in-memory communications -channel. The volumes are not used for any kind of stateful storage on disk. - -Refer to the -[Calico ConfigMap manifest]({{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.4.2.yaml) for an -example with the above changes. - - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/index.mdx deleted file mode 100644 index 74a3bde624..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico for on-premises deployments to provide networking and network policy, in either overlay or non-overlay networking modes. -hide_table_of_contents: true ---- - -# Self-managed on-premises - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/onpremises.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/onpremises.mdx deleted file mode 100644 index b81b533088..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-onprem/onpremises.mdx +++ /dev/null @@ -1,230 +0,0 @@ ---- -description: Install Calico networking and network policy for on-premises deployments. ---- - -# Install Calico networking and network policy for on-premises deployments - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Install {{prodname}} to provide both networking and network policy for self-managed on-premises deployments. - -## Value - -**{{prodname}} networking** and **network policy** are a powerful choice for a CaaS implementation. If you have the networking infrastructure and resources to manage Kubernetes on-premises, installing the full {{prodname}} product provides the most customization and control. - -## Concepts - -### {{prodname}} operator - -{{prodname}} is installed by an operator which manages the installation, upgrade, and general lifecycle of a {{prodname}} cluster. The operator is -installed directly on the cluster as a Deployment, and is configured through one or more custom Kubernetes API resources. - -### {{prodname}} manifests - -{{prodname}} can also be installed using raw manifests as an alternative to the operator. The manifests contain the necessary resources for installing {{prodname}} on each node in your Kubernetes cluster. Using manifests is not recommended as they cannot automatically manage the lifecycle of the {{prodname}} as the operator does. However, manifests may be useful for clusters that require highly specific modifications to the underlying Kubernetes resources. - -## Before you begin... - -- Ensure that your Kubernetes cluster meets [requirements](../requirements.mdx). - If you do not have a cluster, see [Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/). - -## How to - -- [Install Calico](#install-calico) - -### Install Calico - - - - -1. Install the operator on your cluster. - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - -1. Download the custom resources necessary to configure {{prodname}} - - ```bash - curl {{manifestsUrl}}/manifests/custom-resources.yaml -O - ``` - - If you wish to customize the {{prodname}} install, customize the downloaded custom-resources.yaml manifest locally. - -1. Create the manifest to install {{prodname}}. - - ```bash - kubectl create -f custom-resources.yaml - ``` - -1. Verify {{prodname}} installation in your cluster. - - ``` - watch kubectl get pods -n calico-system - ``` - - You should see a result similar to the below. - - ``` - NAMESPACE NAME READY STATUS RESTARTS AGE - kube-system calico-node-txngh 1/1 Running 0 54s - ``` - - - - - - -Based on your datastore and number of nodes, select a link below to install {{prodname}}. - -:::note - -The option, **Kubernetes API datastore, more than 50 nodes** provides scaling using [Typha daemon](../../../reference/typha/index.mdx). Typha is not included for etcd because etcd already handles many clients so using Typha is redundant and not recommended. - -::: - -- [Install Calico with Kubernetes API datastore, 50 nodes or less](#install-calico-with-kubernetes-api-datastore-50-nodes-or-less) -- [Install Calico with Kubernetes API datastore, more than 50 nodes](#install-calico-with-kubernetes-api-datastore-more-than-50-nodes) -- [Install Calico with etcd datastore](#install-calico-with-etcd-datastore) - -#### Install Calico with Kubernetes API datastore, 50 nodes or less - -1. Download the {{prodname}} networking manifest for the Kubernetes API datastore. - - ```bash - curl {{manifestsUrl}}/manifests/calico.yaml -O - ``` - -1. If you are using pod CIDR `192.168.0.0/16`, skip to the next step. - If you are using a different pod CIDR with kubeadm, no changes are required — Calico will automatically detect the CIDR based on the running configuration. - For other platforms, make sure you uncomment the CALICO_IPV4POOL_CIDR variable in the manifest and set it to the same value as your chosen pod CIDR. -1. Customize the manifest as necessary. -1. Apply the manifest using the following command. - - ```bash - kubectl apply -f calico.yaml - ``` - -The geeky details of what you get: - - - -#### Install Calico with Kubernetes API datastore, more than 50 nodes - -1. Download the {{prodname}} networking manifest for the Kubernetes API datastore. - - ```bash - curl {{manifestsUrl}}/manifests/calico-typha.yaml -o calico.yaml - ``` - -1. If you are using pod CIDR `192.168.0.0/16`, skip to the next step. - If you are using a different pod CIDR with kubeadm, no changes are required — Calico will automatically detect the CIDR based on the running configuration. - For other platforms, make sure you uncomment the CALICO_IPV4POOL_CIDR variable in the manifest and set it to the same value as your chosen pod CIDR. -1. Modify the replica count to the desired number in the `Deployment` named, `calico-typha`. - - ```yaml noValidation - apiVersion: apps/v1beta1 - kind: Deployment - metadata: - name: calico-typha - ... - spec: - ... - replicas: - ``` - - We recommend at least one replica for every 200 nodes, and no more than - 20 replicas. In production, we recommend a minimum of three replicas to reduce - the impact of rolling upgrades and failures. The number of replicas should - always be less than the number of nodes, otherwise rolling upgrades will stall. - In addition, Typha only helps with scale if there are fewer Typha instances than - there are nodes. - - :::note - - If you set `typha_service_name` and set the Typha deployment replica - count to 0, Felix will not start. - - ::: - -1. Customize the manifest if desired. -1. Apply the manifest. - - ```bash - kubectl apply -f calico.yaml - ``` - -The geeky details of what you get: - - - -#### Install Calico with etcd datastore - -:::note - -The **etcd** database is not recommended for new installs. However, it is an option if you are running {{prodname}} as the network plugin for both OpenStack and Kubernetes. - -::: - -1. Download the {{prodname}} networking manifest for etcd. - - ```bash - curl {{manifestsUrl}}/manifests/calico-etcd.yaml -o calico.yaml - ``` - -1. If you are using pod CIDR `192.168.0.0/16`, skip to the next step. - If you are using a different pod CIDR with kubeadm, no changes are required — Calico will automatically detect the CIDR based on the running configuration. - For other platforms, make sure you uncomment the CALICO_IPV4POOL_CIDR variable in the manifest and set it to the same value as your chosen pod CIDR. -1. In the `ConfigMap` named, `calico-config`, set the value of `etcd_endpoints` to the IP address and port of your etcd server. - :::note - - You can specify more than one `etcd_endpoint` using commas as delimiters. - - ::: - -1. Customize the manifest if desired. -1. Apply the manifest using the following command. - - ```bash - kubectl apply -f calico.yaml - ``` - -The geeky details of what you get: - - - - - - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx) - -**Recommended - Networking** - -- If you are using the default BGP networking with full-mesh node-to-node peering with no encapsulation, go to [Configure BGP peering](../../../networking/configuring/bgp.mdx) to get traffic flowing between pods. -- If you are unsure about networking options, or want to implement encapsulation (overlay networking), see [Determine best networking option](../../../networking/determine-best-networking.mdx). - -**Recommended - Security** - -- [Secure Calico component communications](../../../network-policy/comms/crypto-auth.mdx) -- [Secure hosts by installing Calico on hosts](../../bare-metal/about.mdx) -- [Secure pods with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) -- If you are using {{prodname}} with Istio service mesh, get started here: [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/aws.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/aws.mdx deleted file mode 100644 index 53f91bf754..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/aws.mdx +++ /dev/null @@ -1,115 +0,0 @@ ---- -description: Use Calico with a self-managed Kubernetes cluster in Amazon Web Services (AWS). ---- - -# Self-managed Kubernetes in Amazon Web Services (AWS) - -## Big picture - -Use {{prodname}} with a self-managed Kubernetes cluster in Amazon Web Services (AWS). - -## Value - -Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like EKS), gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability. - -## Concepts - -Kubernetes Operations (kops) is a cluster management tool that handles provisioning cluster VMs and installing Kubernetes. It has built-in support for using {{prodname}} as the Kubernetes networking provider. - -## Before you begin... - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [AWS CLI tools](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) - -:::note - -{{prodname}} makes use of the Kubernetes Container Storage Interface (CSI) to support various types of volumes. The necessary drivers required for CSI -to function correctly in AWS clusters using EBS volumes may no longer be present by default in clusters running Kubernetes 1.23. Please check the documentation for the installer -being used to ensure the necessary CSI drivers are installed. - -If using Kubernetes Operations (kops) as further down on this page please use the relevant linked kops documentation to ensure your cluster has the necessary configuration. - -::: - -## How to - -There are many ways to install and manage Kubernetes in AWS. Using Kubernetes Operations (kops) is a good default choice for most people, as it gives you access to all of {{prodname}}’s [flexible and powerful networking features](../../../networking/index.mdx). However, there are other options that may work better for your environment. - -- [Kubernetes Operations for Calico networking and network policy](#kubernetes-operations-for-calico-networking-and-network-policy) -- [Other options and tools](#other-options-and-tools) - -### Kubernetes Operations for Calico networking and network policy - -To use kops to create a cluster with {{prodname}} networking and network policy: - -1. [Install kops](https://kops.sigs.k8s.io/install/) on your workstation. -1. [Set up your environment for AWS](https://kops.sigs.k8s.io/getting_started/aws/) . -1. Be sure to [set up an S3 state store](https://kops.sigs.k8s.io/getting_started/aws/#cluster-state-storage) and export its name: - - ``` - export KOPS_STATE_STORE=s3://name-of-your-state-store-bucket - ``` - -1. [Verify CSI driver installation configuration as per your particular cluster and volumes](https://kops.sigs.k8s.io/addons/#self-managed-aws-ebs-csi-driver) -1. Configure kops to use {{prodname}} for networking. - The easiest way to do this is to pass `--networking calico` to kops when creating the cluster. For example: - - ``` - kops create cluster \ - --zones us-west-2a \ - --networking calico \ - name-of-your-cluster - ``` - - Or, you can add `calico` to your cluster config. Run kops edit cluster and set the following networking configuration. - - ```yaml - networking: - calico: {} - ``` - -The geeky details of what you get: - -{' '} - -You can further customize the {{prodname}} install with [options listed in the kops documentation](https://kops.sigs.k8s.io/networking/#calico-example-for-cni-and-network-policy). - -### Other options and tools - -#### Amazon VPC CNI plugin - -As an alternative to {{prodname}} for both networking and network policy, you can use Amazon’s VPC CNI plugin for networking, and {{prodname}} for network policy. The advantage of this approach is that pods are assigned IP addresses associated with Elastic Network Interfaces on worker nodes. The IPs come from the VPC network pool and therefore do not require NAT to access resources outside the Kubernetes cluster. - -Set your kops cluster configuration to: - -```yaml -networking: - amazonvpc: {} -``` - -Then install {{prodname}} for network policy only after the cluster is up and ready. - -The geeky details of what you get: - - - -#### Kubespray - -[Kubespray](https://kubespray.io/) is a tool for provisioning and managing Kubernetes clusters with support for multiple clouds including Amazon Web Services. {{prodname}} is the default networking provider, or you can set the `kube_network_plugin` variable to `calico`. See the [Kubespray docs](https://kubespray.io/#/?id=network-plugins) for more details. - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx) - -**Recommended** - -- [Video: Everything you need to know about Kubernetes pod networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/) -- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/azure.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/azure.mdx deleted file mode 100644 index d726c89186..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/azure.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -description: Use Calico with a self-managed Kubernetes cluster in Microsoft Azure. ---- - -# Self-managed Kubernetes in Microsoft Azure - -## Big picture - -Use {{prodname}} with a self-managed Kubernetes cluster in Microsoft Azure. - -## Value - -Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like AKS), gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability. - -## Concepts - -**aks-engine** is an open-source tool for creating and managing Kubernetes clusters in Microsoft Azure. It is the core technology for Microsoft’s Azure Kubernetes Service (AKS), but allows you to manage the cluster yourself. - -## Before you begin... - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [Azure CLI tools](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) - -## How to - -There are many ways to install and manage Kubernetes in Azure. This guide shows how to use **aks-engine** to deploy a cluster with **Azure’s CNI plugin for networking** and **{{prodname}} for network policy enforcement**. The advantage of this approach is that pods are assigned IP addresses associated with Azure Network Interfaces on worker nodes. The IPs come from the VNET network pool and therefore do not require NAT to access resources outside the Kubernetes cluster. However, there are other options that may work better for your environment. - -- [aks-engine for Azure networking and Calico network policy](#aks-engine-for-azure-networking-and-calico-network-policy) -- [Other options and tools](#other-options-and-tools) - -### aks-engine for Azure networking and Calico network policy - -[Install aks-engine](https://github.com/Azure/aks-engine/blob/master/docs/tutorials/quickstart.md#install-aks-engine) on your workstation. - -Before deploying, customize your cluster definition to use {{prodname}} for network policy. Add or modify the `kubernetesConfig` section to include the following (see the [aks-engine documentation](https://github.com/Azure/aks-engine/blob/master/docs/topics/clusterdefinitions.md#kubernetesconfig) for other Kubernetes configuration settings). - -``` -"kubernetesConfig": { - "networkPlugin": "azure", - "networkPolicy": "calico" - } -``` - -Or, start with this [example cluster definition](https://github.com/Azure/aks-engine/blob/master/examples/networkpolicy/kubernetes-calico-azure.json) with these value already set, and customize to meet your needs. - -Then, [follow the aks-engine documentation to deploy your cluster](https://github.com/Azure/aks-engine/blob/master/docs/tutorials/quickstart.md#deploy), passing your cluster definition to `aks-engine deploy` via the `-m` flag. - -The geeky details of what you get: - -{' '} - -### Other options and tools - -#### {{prodname}} networking - -You can also deploy {{prodname}} for both networking and policy enforcement. In this mode, {{prodname}} uses a VXLAN-based overlay network that masks the IP addresses of the pods from the underlying Azure VNET. This can be useful in large deployments or when running multiple clusters and IP address space is a big concern. - -Unfortunately, aks-engine does not support this mode, so you must use a different tool chain to install and manage the cluster. Some options: - -- Use [Terraform](#terraform) to provision the Azure networks and VMs, then [kubeadm](#kubeadm) to install the Kubernetes cluster. -- Use [Kubespray](#kubespray) - -### Terraform - -Terraform is a tool for automating infrastructure provisioning using declarative configurations. You can also go as far as automating the install of Docker, kubeadm, and Kubernetes using Terraform “provisioners.” See the [Terraform documentation](https://www.terraform.io/docs/index.html) for more details. - -#### kubeadm - -[kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/) is a command line tool for bootstrapping a Kubernetes cluster on top of already-provisioned compute resources, like VMs in a cloud or bare metal hosts. Unlike aks-engine which handles provisioning cloud resources, installing Kubernetes, and installing {{prodname}}, kubeadm only handles the second step of installing Kubernetes. You should proceed to install {{prodname}} after completing kubeadm install. - -#### Kubespray - -[Kubespray](https://kubespray.io/) is a tool for provisioning and managing Kubernetes clusters with support for multiple clouds including Azure. {{prodname}} is the default networking provider, or you can set the `kube_network_plugin` variable to `calico`. See the [Kubespray docs](https://kubespray.io/#/?id=network-plugins) for more details. - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx) - -**Recommended** - -- [Video: Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/) -- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/do.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/do.mdx deleted file mode 100644 index e4ca6c6d78..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/do.mdx +++ /dev/null @@ -1,128 +0,0 @@ ---- -description: Use Calico with a self-managed Kubernetes cluster in DigitalOcean (DO). ---- - -# Self-managed Kubernetes in DigitalOcean (DO) - -## Big picture - -This tutorial creates a self-managed Kubernetes cluster (1 Master, 2 Worker nodes) using {{prodname}} networking in DigitalOcean. - -## Value - -Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like EKS), gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability. - -## Concepts - -Kubernetes Operations (kops) is a cluster management tool that handles provisioning cluster VMs and installing Kubernetes. It has built-in support for using {{prodname}} as the Kubernetes networking provider. - -:::note - -Kops support for DigitalOcean is currently in the early stages of development and subject to change. -More information can be viewed [at this link.](https://kops.sigs.k8s.io/getting_started/digitalocean/) - -::: - -## Before you begin... - -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -- Install [kops](https://kops.sigs.k8s.io/install/) - -## How to - -There are many ways to install and manage Kubernetes in DO. Using Kubernetes Operations (kops) is a good default choice for most people, as it gives you access to all of {{prodname}}’s [flexible and powerful networking features](../../../networking/index.mdx). However, there are other options that may work better for your environment. - -The geeky details of what you get: - - - -### Generate your DigitalOcean API token - -An API token is needed by kops for the CRUD (Create, Read, Update and Delete) operations necessary for resources in your DigitalOcean account. -Use [this link](https://www.digitalocean.com/docs/apis-clis/api/create-personal-access-token/) to generate your API token and then export it as an environment variable. - -```bash -export DIGITALOCEAN_ACCESS_TOKEN= -``` - -### Create an object storage - -DigitalOcean provides an S3 compatible storage API that Kops uses object storage to save your cluster status. -You should create a Space using [this link](https://www.digitalocean.com/docs/spaces/how-to/create/) and export it. - -```bash -export KOPS_STATE_STORE=do:// -export S3_ENDPOINT= -``` - -:::note - -Using FQDN for `S3_ENDPOINT` causes an error. -If your Space FQDN is `my-test-space.nyc3.digitaloceanspaces.com` just export `my-test-space` as `` . - -::: - -### Generate an API key for object storage - -Access to object storage requires an API key. -Follow this [tutorial](https://www.digitalocean.com/docs/spaces/how-to/manage-access/) and generate your keys then export them as environment variables. - -```bash -export S3_ACCESS_KEY_ID= -export S3_SECRET_ACCESS_KEY= -``` - -### Enable kops alpha feature - -Enable alpha feature support using `KOPS_FEATURE_FLAGS` environment variable. - -```bash - export KOPS_FEATURE_FLAGS="AlphaAllowDO" -``` - -### Create your cluster - -Kops supports various options that enables you to customize your cluster the way you like. - -1. Add Calico to your cluster using `--networking=calico`. -1. Kops requires an external DNS server to create a cluster, by adding `.k8s.local` suffix to `--name=` option - you generate a [gossip](https://kops.sigs.k8s.io/gossip/) DNS to bypass this requirement. - -:::note - -You can view a complete list of options supported by kops -[in this link.](https://kops.sigs.k8s.io/cli/kops_create_cluster/#options) - -::: - -```bash - kops create cluster --cloud=digitalocean --name=calico-demo.k8s.local \ - --networking=calico --master-zones=nyc1 --zones=nyc1 \ - --master-count=1 --api-loadbalancer-type=public \ - --node-size=s-1vcpu-2gb --image=ubuntu-20-04-x64 --yes -``` - -You can further customize the {{prodname}} install with [options listed in the kops documentation](https://kops.sigs.k8s.io/networking/calico). - -## Cleanup - -If you wish to remove resources created by this tutorial - -```bash -kops delete cluster calico-demo.k8s.local --yes -``` - -Use the DigitalOcean web UI to remove the API tokens and Space you created. - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx) - -**Recommended** - -- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/gce.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/gce.mdx deleted file mode 100644 index adc34054a3..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/gce.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -description: Use Calico with a self-managed Kubernetes cluster in Google Compute Engine (GCE). ---- - -# Self-managed Kubernetes in Google Compute Engine (GCE) - -## Big picture - -Use {{prodname}} with a self-managed Kubernetes cluster in Google Compute Engine (GCE). - -## Value - -Managing your own Kubernetes cluster (as opposed to using a managed-Kubernetes service like GKE) gives you the most flexibility in configuring {{prodname}} and Kubernetes. {{prodname}} combines flexible networking capabilities with "run-anywhere" security enforcement to provide a solution with native Linux kernel performance and true cloud-native scalability. - -## Concepts - -**kubeadm** is a cluster management tool that is used to install Kubernetes. - -## Before you begin... - -[Install and configure the Google Cloud CLI tools](https://cloud.google.com/sdk/docs/quickstarts) - -## How to - -There are many ways to install and manage Kubernetes in GCE. Using kubeadm is a good default choice for most people, as it gives you access to all of {{prodname}}’s [flexible and powerful networking features](../../../networking/index.mdx). However, there are other options that may work better for your environment. - -- [kubeadm for Calico networking and network policy](#kubeadm-for-calico-networking-and-network-policy) -- [Other tools and options](#other-tools-and-options) - -### kubeadm for Calico networking and network policy - -#### Create cloud resources - -You will need at least one VM to serve as a control plane node and one or more worker nodes. (It is possible to have control plane nodes also act as workers. This is not recommended in most cases and not covered by this guide.) See [requirements](../requirements.mdx) for specific OS requirements for these VMs. - -The following worked example creates a single control node and three workers on a dedicated virtual private network (VPC). Adjust the example as needed for your requirements. Consider a dedicated infrastructure management tool like [Terraform](https://www.terraform.io/) for managing cloud resources. (This example is adapted from [Kubernetes the Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way/blob/master/docs/03-compute-resources.md).) - -1. Create the VPC. - - ```bash - gcloud compute networks create example-k8s --subnet-mode custom - ``` - -2. Create the k8s-nodes subnet in the example-k8s VPC network: - - ```bash - gcloud compute networks subnets create k8s-nodes \ - --network example-k8s \ - --range 10.240.0.0/24 - ``` - -3. Create a firewall rule that allows internal communication across TCP, UDP, ICMP and IP in IP (used for the Calico overlay): - - ```bash - gcloud compute firewall-rules create example-k8s-allow-internal \ - --allow tcp,udp,icmp,ipip \ - --network example-k8s \ - --source-ranges 10.240.0.0/24 - ``` - -4. Create a firewall rule that allows external SSH, ICMP, and HTTPS: - - ```bash - gcloud compute firewall-rules create example-k8s-allow-external \ - --allow tcp:22,tcp:6443,icmp \ - --network example-k8s \ - --source-ranges 0.0.0.0/0 - ``` - -5. Create the controller VM. - - ```bash - gcloud compute instances create controller \ - --async \ - --boot-disk-size 200GB \ - --can-ip-forward \ - --image-family ubuntu-2204-lts \ - --image-project ubuntu-os-cloud \ - --machine-type n1-standard-2 \ - --private-network-ip 10.240.0.11 \ - --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ - --subnet k8s-nodes \ - --zone us-central1-f \ - --tags example-k8s,controller - ``` - -6. Create three worker VMs. - - ```bash - for i in 0 1 2; do - gcloud compute instances create worker-${i} \ - --async \ - --boot-disk-size 200GB \ - --can-ip-forward \ - --image-family ubuntu-2204-lts \ - --image-project ubuntu-os-cloud \ - --machine-type n1-standard-2 \ - --private-network-ip 10.240.0.2${i} \ - --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ - --subnet k8s-nodes \ - --zone us-central1-f \ - --tags example-k8s,worker - done - ``` -7. Install Docker on the controller VM and each worker VM. On each VM run: - - ```bash - sudo apt update - sudo apt install -y docker.io - sudo systemctl enable docker.service - sudo apt install -y apt-transport-https curl - ``` - -#### Install Kubernetes and create the cluster - -1. Install `kubeadm`,` kubelet`, and `kubectl` on each worker node and the controller node (see [kubeadm docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#installing-kubeadm-kubelet-and-kubectl) for more details). - - Connect to each node and run these commands: - - ```bash - curl -fsSL https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add - - cat < --discovery-token-ca-cert-hash sha256: - ``` - -5. Verify that all nodes have joined. - - Run this command on the controller node. - - ```bash - kubectl get nodes - ``` - -which should output something similar to: - -``` -NAME STATUS ROLES AGE VERSION -controller NotReady master 5m49s v1.17.2 -worker-0 NotReady 3m38s v1.17.2 -worker-1 NotReady 3m7s v1.17.2 -worker-2 NotReady 5s v1.17.2 -``` - -#### Install {{prodname}} - -1. On the controller, install {{prodname}} using the operator: - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - -2. Download the custom resources necessary to configure {{prodname}} - - ```bash - curl {{manifestsUrl}}/manifests/custom-resources.yaml -O - ``` - -3. If you wish to customize the {{prodname}} install, customize the downloaded custom-resources.yaml manifest. Then create the manifest to install {{prodname}}. - - ```bash - kubectl create -f custom-resources.yaml - ``` - -The geeky details of what you get: - -{' '} - -### Other tools and options - -#### Terraform - -You may have noticed that the bulk of the above instructions are about provisioning the Google Cloud resources for the cluster and installing Kubernetes. Terraform is a tool for automating infrastructure provisioning using declarative configurations. You can also go as far as automating the install of Docker, kubeadm, and Kubernetes using Terraform “provisioners.” See the [Terraform documentation](https://www.terraform.io/docs/index.html) for more details. - -#### Kubespray - -[Kubespray](https://kubespray.io/) is a tool for provisioning and managing Kubernetes clusters with support for multiple clouds including Google Compute Engine. Calico is the default networking provider, or you can set the `kube_network_plugin` variable to `calico`. See the Kubespray docs for more details. See the [Kubespray docs](https://kubespray.io/#/?id=network-plugins) for more details. - -## Next steps - -**Required** - -- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx) - -**Recommended** - -- [Video: Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/) -- [Try out {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/index.mdx deleted file mode 100644 index 371a6dd742..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/self-managed-public-cloud/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Manage your own Kubernetes clusters in AWS, GCE, or Azure public clouds. -hide_table_of_contents: true ---- - -# Self-managed public cloud - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/getting-started.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/getting-started.mdx deleted file mode 100644 index 5be5af49c6..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/getting-started.mdx +++ /dev/null @@ -1,423 +0,0 @@ ---- -description: Install Calico with the VPP dataplane on a Kubernetes cluster. ---- - -# Get started with VPP networking - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Install {{prodname}} and enable the beta release of the VPP dataplane. - -:::caution - -The VPP dataplane is in beta and should not be used in production clusters. It has had lots of testing and is pretty stable. However, chances are that some bugs are still lurking around (please report these on the [Calico Users slack](https://calicousers.slack.com/archives/C017220EXU1) or [GitHub](https://github.com/projectcalico/vpp-dataplane/issues)). In addition, it still does not support all the features of {{prodname}}. - -::: - -## Value - -The VPP dataplane mode has several advantages over standard Linux networking pipeline mode: - -- Scales to higher throughput, especially with WireGuard encryption enabled -- Further improves encryption performance with IPsec -- Native support for Kubernetes services without needing kube-proxy, which: - - Reduces first-packet latency for packets to services - - Preserves external client source IP addresses all the way to the pod - -The VPP dataplane is entirely compatible with the other {{prodname}} dataplanes, meaning you can have a cluster with VPP-enabled nodes along with regular nodes. This makes it possible to migrate a cluster from Linux or eBPF networking to VPP networking. - -In addition, the VPP dataplane offers some specific features for network-intensive applications, such as providing `memif` userspace packet interfaces to the pods (instead of regular Linux network devices), or exposing the VPP Host Stack to run optimized L4+ applications in the pods. - -Trying out the beta will give you a taste of these benefits and an opportunity to give feedback to the VPP dataplane team. - -## Concepts - -### VPP - -The Vector Packet Processor (VPP) is a high-performance, open-source userspace network dataplane written in C, developed under the [fd.io](https://fd.io) umbrella. It supports many standard networking features (L2 switching, L3 routing, NAT, encapsulations), and is easily extensible using plugins. The VPP dataplane uses plugins to efficiently implement Kubernetes services load balancing and {{prodname}} policies. - -### Operator based installation - -This guide uses the Tigera operator to install {{prodname}}. The operator provides lifecycle management for {{prodname}} -exposed via the Kubernetes API defined as a custom resource definition. While it is also technically possible to install {{prodname}} -and configure it for VPP using manifests directly, only operator based installations are supported at this stage. - -## How to - -This guide details two ways to install {{prodname}} with the VPP dataplane: - -- On a managed EKS cluster. This is the option that requires the least configuration -- On a managed EKS cluster with the DPDK interface driver. This options is more complex to set up but provides better performance -- On any Kubernetes cluster - -In all cases, here are the details of what you will get: - - - - - - -## Install Calico with the VPP dataplane on an EKS cluster - -### Requirements - -For these instructions, we will use `eksctl` to provision the cluster. However, you can use any of the methods in [Getting Started with Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) - -Before you get started, make sure you have downloaded and configured the [necessary prerequisites](https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html#eksctl-prereqs) - -### Provision the cluster - -1. First, create an Amazon EKS cluster without any nodes. - - ```bash - eksctl create cluster --name my-calico-cluster --without-nodegroup - ``` - -1. Since this cluster will use {{prodname}} for networking, you must delete the `aws-node` DaemonSet to disable the default AWS VPC networking for the pods. - - ```bash - kubectl delete daemonset -n kube-system aws-node - ``` - -### Install and configure Calico with the VPP dataplane - -1. Now that you have an empty cluster configured, you can install the Tigera operator. - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - - :::note - - Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - - ::: - -1. Then, you need to configure the {{prodname}} installation for the VPP dataplane. The yaml in the link below contains a minimal viable configuration for EKS. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx). - - :::note - - Before applying this manifest, read its contents and make sure its settings are correct for your environment. For example, - you may need to specify the default IP pool CIDR to match your desired pod network CIDR. - - ::: - - ```bash - kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/calico/installation-eks.yaml - ``` - -1. Now is time to install the VPP dataplane components. - - ```bash - kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp-eks.yaml - ``` - -1. Finally, add nodes to the cluster. - - ```bash - eksctl create nodegroup --cluster my-calico-cluster --node-type t3.medium --node-ami auto --max-pods-per-node 50 - ``` - - :::tip - - The --max-pods-per-node option above, ensures that EKS does not limit the [number of pods based on node-type](https://github.com/awslabs/amazon-eks-ami/blob/main/nodeadm/internal/kubelet/eni-max-pods.txt). For the full set of node group options, see `eksctl create nodegroup --help`. - - ::: - - - - -## Install Calico with the VPP dataplane on an EKS cluster with the DPDK driver - -### Requirements - -DPDK provides better performance compared to the standard install but it requires some additional customisations (hugepages, for instance) in the EKS worker instances. We have a bash script, `init_eks.sh`, which takes care of applying the required customizations and we make use of the `preBootstrapCommands` property of `eksctl` [configuration file](https://eksctl.io/usage/schema) to execute the script during the worker node creation. These instructions require the latest version of `eksctl`. - -### Provision the cluster - -1. First, create an Amazon EKS cluster without any nodes. - - ```bash - eksctl create cluster --name my-calico-cluster --without-nodegroup - ``` - -2. Since this cluster will use {{prodname}} for networking, you must delete the `aws-node` DaemonSet to disable the default AWS VPC networking for the pods. - - ```bash - kubectl delete daemonset -n kube-system aws-node - ``` - -### Install and configure Calico with the VPP dataplane - -1. Now that you have an empty cluster configured, you can install the Tigera operator. - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - - :::note - - Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - - ::: - -2. Then, you need to configure the {{prodname}} installation for the VPP dataplane. The yaml in the link below contains a minimal viable configuration for EKS. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx). - - :::note - - Before applying this manifest, read its contents and make sure its settings are correct for your environment. For example, - you may need to specify the default IP pool CIDR to match your desired pod network CIDR. - - ::: - - ```bash - kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/calico/installation-eks.yaml - ``` - -3. Now is time to install the VPP dataplane components. - - ```bash - kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp-eks-dpdk.yaml - ``` - -4. Finally, time to add nodes to the cluster. Since we need to customize the nodes for DPDK, we will use an `eksctl` config file with the `preBootstrapCommands` property to create the worker nodes. The following command will create a managed nodegroup with 2 t3.large worker nodes in the cluster: - - ``` - cat < - ``` - - For details on ssh access refer to [Amazon EC2 key pairs and Linux instances](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html). - - - - -## Install Calico with the VPP dataplane on any Kubernetes cluster - -### Requirements - -The VPP dataplane has the following requirements: - -**Required** - -- A blank Kubernetes cluster, where no CNI was ever configured. -- These [base requirements](../requirements.mdx), except those related to the management of `cali*`, `tunl*` and `vxlan.calico` interfaces. - - :::note - - If you are using `kubeadm` to create the cluster please make sure to specify the pod network CIDR using the `--pod-network-cidr` command-line argument, i.e., `sudo kubeadm init --pod-network-cidr=192.168.0.0/16`. If 192.168.0.0/16 is already in use within your network you must select a different pod network CIDR. - - ::: - -**Optional** -For some hardware, the following hugepages configuration may enable VPP to use more efficient drivers: - -- At least 512 x 2MB-hugepages are available (`grep HugePages_Free /proc/meminfo`) -- The `vfio-pci` (`vfio_pci` on centos) or `uio_pci_generic` kernel module is loaded. For example: - - ```bash - echo "vfio-pci" > /etc/modules-load.d/95-vpp.conf - modprobe vfio-pci - echo "vm.nr_hugepages = 512" >> /etc/sysctl.conf - sysctl -p - # restart kubelet to take the changes into account - # you may need to use a different command depending on how kubelet was installed - systemctl restart kubelet - ``` - -### Install Calico and configure it for VPP - -1. Start by installing the Tigera operator on your cluster. - - ```bash - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - - :::note - - Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - - ::: - -1. Then, you need to configure the {{prodname}} installation for the VPP dataplane. The yaml in the link below contains a minimal viable configuration for VPP. For more information on configuration options available in this manifest, see [the installation reference](../../../reference/installation/api.mdx). - - :::note - - Before applying this manifest, read its contents and make sure its settings are correct for your environment. For example, - you may need to specify the default IP pool CIDR to match your desired pod network CIDR. - - ::: - - ```bash - kubectl create -f https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/calico/installation-default.yaml - ``` - -### Install the VPP dataplane components - -Start by getting the appropriate yaml manifest for the VPP dataplane resources: - -```bash -# If you have configured hugepages on your machines -curl -o calico-vpp.yaml https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp.yaml -``` - -```bash -# If not, or if you're unsure -curl -o calico-vpp.yaml https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/generated/calico-vpp-nohuge.yaml -``` - -Then locate the `calico-vpp-config` ConfigMap in this yaml manifest and configure it as follows. - -**Required Configuration** - -- `CALICOVPP_INTERFACES` contains a dictionary with parameters specific to interfaces in calicovpp. The field `uplinkInterfaces` contains a list of interfaces and their configuration, with the first element being the primary interface, and the rest (if any) being the secondary host interfaces. - -```yaml -CALICOVPP_INTERFACES: |- - { - "uplinkInterfaces": [ { "interfaceName": "eth0" } ] - } -``` - -The name of the used interface must be the name of a Linux interface, up and configured with an address. The address configured on this interface **must** be the node address in Kubernetes (`kubectl get nodes -o wide`). - -**Configuration options** - -`CALICOVPP_INTERFACES` - -| Field | Description | Type | -| ---------------- | -------------------------------------------------------- | --------------------------------------------------- | -| maxPodIfSpec | spec containing max values for pod interfaces config | [InterfaceSpec](#InterfaceSpec) | -| defaultPodIfSpec | spec containing default values for pod interfaces config | [InterfaceSpec](#InterfaceSpec) | -| vppHostTapSpec | spec containing config for host tap interface in vpp | [InterfaceSpec](#InterfaceSpec) | -| uplinkInterfaces | list of host interfaces in vpp | List of [UplinkInterfaceSpec](#UplinkInterfaceSpec) | - -#### InterfaceSpec - -| Field | Description | Type | Default | -| ------ | ---------------------------------- | -------------------------------------------------- | --------------------------------- | -| rx | Number of RX queues | int | 1 | -| tx | Number of TX queues | int | 1 | -| rxqsz | RX queue size | int | 1024 | -| txqsz | TX queue size | int | 1024 | -| isl3 | Defines the interface mode (L2/L3) | boolean | true for tuntap ; false for memif | -| rxMode | RX mode | string among "interrupt", "adaptive", or "polling" | `adaptive` | - -#### UplinkInterfaceSpec - -| Field | Description | Type | Default | -| ------------- | -------------------------------------------------------------- | -------------------------------------------------- | ----------------------------- | -| rx | Number of RX queues | int | 1 | -| tx | Number of TX queues | int | 1 | -| rxqsz | RX queue size | int | 1024 | -| txqsz | TX queue size | int | 1024 | -| isl3 | Defines the interface mode (L2/L3) for drivers that support it | boolean | true | -| rxMode | RX mode | string among "interrupt", "adaptive", or "polling" | `adaptive` | -| InterfaceName | interface name | string | unset | -| vppDriver | driver to use in vpp | string | unset | -| newDriver | linux driver to use before passing the interface to VPP | string | unset | -| mtu | the interface's mtu | int | use the existing MTU in linux | - -- `service_prefix` is the Kubernetes service CIDR. You can retrieve it by running: - -```bash -kubectl cluster-info dump | grep -m 1 service-cluster-ip-range -``` - -If this command doesn't return anything, you can leave the default value of `10.96.0.0/12`. - -**Optional** - -- To configure how VPP drives the physical interface, use `vppDriver` field for `uplinkInterfaces` elements in `CALICOVPP_INTERFACES`. - -The supported values will depend on the interface type. Available values are: - -- `""` : will automatically select and try drivers based on interface type and available resources, starting with the fastest -- `af_xdp` : use an AF_XDP socket to drive the interface (requires kernel 5.4 or newer) -- `af_packet` : use an AF_PACKET socket to drive the interface (not optimized but works everywhere) -- `avf` : use the VPP native driver for Intel 700-Series and 800-Series interfaces (requires hugepages) -- `vmxnet3` : use the VPP native driver for VMware virtual interfaces (requires hugepages) -- `virtio` : use the VPP native driver for Virtio virtual interfaces (requires hugepages) -- `rdma` : use the VPP native driver for Mellanox CX-4 and CX-5 interfaces (requires hugepages) -- `dpdk` : use the DPDK interface drivers with VPP (requires hugepages, works with most interfaces) -- `none` : do not configure connectivity automatically. This can be used when [configuring the interface manually](../../../reference/vpp/uplink-configuration.mdx) - -**Legacy options** - -We maintain legacy support for the `CALICOVPP_INTERFACE` and `CALICOVPP_NATIVE_DRIVER` environment variables: - -`CALICOVPP_INTERFACE` -> `uplinkInterfaces[0].interfaceName` - -`CALICOVPP_NATIVE_DRIVER` -> `uplinkInterfaces[0].vppDriver` - -If `CALICOVPP_INTERFACES` is unspecified, `CALICOVPP_INTERFACE` is the primary interface to be used. -In that case, use `CALICOVPP_NATIVE_DRIVER` instead of `vppDriver`. - -So either patch `CALICOVPP_INTERFACES` with the suitable interface in `uplinkInterfaces`, or delete `CALICOVPP_INTERFACES` and use `CALICOVPP_INTERFACE` instead. - -**Example** - -```yaml noValidation -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: calico-vpp-dataplane -data: - service_prefix: 10.96.0.0/12 - vpp_dataplane_interface: eth1 - vpp_uplink_driver: "" - ... -``` - -### Apply the configuration - -To apply the configuration, run: - -```bash -kubectl create -f calico-vpp.yaml -``` - -This will install all the resources required by the VPP dataplane in your cluster. - - - - -## Next steps - -After installing {{prodname}} with the VPP dataplane, you can benefit from the features of the VPP dataplane, such as fast [IPsec](ipsec.mdx) or [Wireguard](../../../network-policy/encrypt-cluster-pod-traffic.mdx) encryption. - -**Tools** - -- [Install and configure calicoctl](../../../operations/calicoctl/install.mdx) to configure and monitor your cluster. - -**Security** - -- [Secure pods with {{prodname}} network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/index.mdx deleted file mode 100644 index f2d1d775d5..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install the VPP userspace dataplane to unlock extra performance for your cluster! -hide_table_of_contents: true ---- - -# VPP dataplane tech preview - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/ipsec.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/ipsec.mdx deleted file mode 100644 index faa1952743..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/ipsec.mdx +++ /dev/null @@ -1,62 +0,0 @@ ---- -description: Enable IPsec for faster encryption between nodes when using the VPP dataplane. ---- - -# IPsec configuration with VPP - -## Big picture - -Enable IPsec encryption for the traffic flowing between the nodes. - -## Value - -IPsec is the fastest option to encrypt the traffic between nodes. It enables blanket application traffic encryption with very little performance impact. - -## Before you begin... - -To enable IPsec encryption, you will need a Kubernetes cluster with: - -- the [VPP dataplane](getting-started.mdx) configured -- [IP-in-IP encapsulation](../../../networking/configuring/vxlan-ipip.mdx) configured between the nodes - -## How to - -- [Create the IKEv2 PSK](#create-the-ikev2-psk) -- [Configure the VPP dataplane](#configure-the-vpp-dataplane) - -### Create the IKEv2 PSK - -Create a Kubernetes secret that contains the PSK used for the IKEv2 exchange between the nodes. You can use the following command to create a random PSK. It will generate a unique random key. You may also replace the part after `psk=` with a key of your choice. - -```bash -kubectl -n calico-vpp-dataplane create secret generic calicovpp-ipsec-secret \ - --from-literal=psk="$(dd if=/dev/urandom bs=1 count=36 2>/dev/null | base64)" -``` - -### Configure the VPP dataplane - -To enable IPsec, you need to configure two environment variables on the `calico-vpp-node` pod. You can do so with the following kubectl command: - -```bash -kubectl -n calico-vpp-dataplane patch daemonset calico-vpp-node --patch "$(curl https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/yaml/components/ipsec/ipsec.yaml)" -``` - -Once IPsec is enabled, all the traffic that uses IP-in-IP encapsulation in the cluster will be automatically encrypted. - -## Next steps - -### Verify encryption - -To verify that the traffic is encrypted, open a VPP debug CLI session to check the configuration with [calivppctl](../../../operations/troubleshoot/vpp.mdx) - -```bash -calivppctl vppctl myk8node1 -``` - -Then at the `vpp#` prompt, you can run the following commands: - -- `show ikev2 profile` will list the configured IKEv2 profiles, there should be one per other node in your cluster -- `show ipsec sa` will list the establish IPsec SA, two per IKEv2 profile -- `show interface` will list all the interfaces configured in VPP. The ipip interfaces (which correspond to the IPsec tunnels) should be up - -You can also [capture the traffic](../../../operations/troubleshoot/vpp.mdx#tracing-packets) flowing between the nodes to verify that it is encrypted. diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/specifics.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/specifics.mdx deleted file mode 100644 index de906b30b6..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/vpp/specifics.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -description: Behavioral discrepancies when running with the Calico/VPP dataplane ---- - -# Details of VPP implementation & known-issues - -Enabling VPP as the calico Dataplane should be transparent for most applications, but some specific behaviours might differ. This page gives a summary of the main differences, as well as the features that are still unsupported or with known issues. - -## Behavioural differences from other dataplanes - -The main difference between VPP and a regular iptables/IPVS dataplane is in the NodePorts implementation. As the constraints differ, it allows VPP to optimise the service implementation, but as a consequence, some behaviours might differ. This will mostly impact policies expecting packets to have been source NATed or not. - -- For `ClusterIPs`, `ExternalIPs` and `LoadBalancerIPs` load-balancing is done with the Maglev algorithm, and the packets are only NAT-ed on the node where the selected backend lives. This allows us to avoid source NAT-ing packets, and thus present the real client address to the destination pod. The same is true when a pod connects to a ClusterIP. This behavior allows the service load balancing to use direct service return (DSR) by default. - -- For `NodePorts` packets are always NATed on the node targeted by the traffic. This is not the case for the eBPF dataplane where all nodes will NAT traffic to a node port regardless of the destination IP. Traffic is also always source-NATed in order for the return traffic to come back through the same node. - -## Known issues & unsupported features - -Although we aim at being feature complete, as VPP is still in beta status, some features are still unsupported or have known issues : - -- For host endpoints policies, setting `doNotTrack` or `preDNAT` is not supported. - - Setting them to `true` will result in the policy being ignored, and an error message to be printed by the calico-vpp-agent -- VPP does not support running with `BGP disabled`. -- `Session affinity for services` is not supported -- `Wireguard` is supported when activated cluster wide at startup time. Enabling/disabling Wireguard on a running cluster with live pods is known to be unstable. -- `EndpointSlices` are not supported diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/demo.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/demo.mdx deleted file mode 100644 index 361719619b..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/demo.mdx +++ /dev/null @@ -1,630 +0,0 @@ ---- -description: An interactive demo to show how to apply basic network policy to pods in a Calico for Windows cluster. ---- - -# Basic policy demo - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -This guide provides a simple demo to illustrate basic pod-to-pod connectivity and the application of network policy in a {{prodnameWindows}} cluster. We will create client and server pods on Linux and Windows nodes, verify connectivity between the pods, and then we'll apply a basic network policy to isolate pod traffic. - -## Prerequisites - -To run this demo, you will need a [{{prodnameWindows}} cluster](quickstart.mdx) with -Windows Server 1809 (build 17763.1432 August 2020 update or newer). More recent versions of Windows Server can be used with a change to the demo manifests. - -:::note - -Windows Server 1809 (build older than 17763.1432) do not support [direct server return](https://techcommunity.microsoft.com/t5/networking-blog/direct-server-return-dsr-in-a-nutshell/ba-p/693710). This means that policy support is limited to only pod IP addresses. - -::: - - - - -## Create pods on Linux nodes - -First, create a client (busybox) and server (nginx) pod on the Linux nodes: - -```bash -kubectl apply -f - < 80 -``` - -To combine both of the above steps: - -```bash -kubectl exec -n calico-demo busybox -- nc -vz $(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') 80 -``` - -If the connection from the busybox pod to the porter pod succeeds, we will get output similar to the following: - -``` -192.168.40.166 (192.168.40.166:80) open -``` - -Now let's verify that the powershell pod can reach the nginx pod: - -```bash -kubectl exec -n calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po nginx -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 -``` - -If the connection succeeds, we will get output similar to: - -``` -StatusCode : 200 -StatusDescription : OK -Content : - - - Welcome to nginx! - - <... -... -``` - -Finally, let's verify that the powershell pod can reach the porter pod: - -```bash -kubectl exec -n calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 -``` - -If that succeeds, we will see something like: - -``` -StatusCode : 200 -StatusDescription : OK -Content : This is a Calico for Windows demo. -RawContent : HTTP/1.1 200 OK - Content-Length: 49 - Content-Type: text/plain; charset=utf-8 - Date: Fri, 21 Aug 2020 22:45:46 GMT - - This is a Calico for Windows demo. -Forms : -Headers : {[Content-Length, 49], [Content-Type, text/plain; - charset=utf-8], [Date, Fri, 21 Aug 2020 22:45:46 GMT]} -Images : {} -InputFields : {} -Links : {} -ParsedHtml : -RawContentLength : 49 -``` - -## Apply policy to the Windows client pod - -Now let's apply a basic network policy that allows only the busybox pod to reach the porter pod. - -```bash -calicoctl apply -f - < - - -## Installing kubectl on Windows - -To run the commands in this demo you need the Windows version of kubectl installed and add it to the system path. -[Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/) and move the kubectl binary to **c:\k**. - -Add `c:\k` to the system path - -1. Open a PowerShell window as Administrator - - ```powershell - $env:Path += ";C:\k" - ``` - -1. Close all PowerShell windows. - -## Create pods on Linux nodes - -First, create a client (busybox) and server (nginx) pod on the Linux nodes. - -### Create a YAML file policy-demo-linux.yaml using your favorite editor on Windows - -```yaml -apiVersion: v1 -kind: Namespace -metadata: - name: calico-demo - ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - app: busybox - name: busybox - namespace: calico-demo -spec: - containers: - - args: - - /bin/sh - - -c - - sleep 360000 - image: busybox:1.28 - imagePullPolicy: Always - name: busybox - nodeSelector: - kubernetes.io/os: linux - ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - app: nginx - name: nginx - namespace: calico-demo -spec: - containers: - - name: nginx - image: nginx:1.8 - ports: - - containerPort: 80 - nodeSelector: - kubernetes.io/os: linux -``` - -### Apply the policy-demo-linux.yaml file to the Kubernetes cluster - -1. Open a PowerShell window. -1. Use `kubectl` to apply the `policy-demo-linux.yaml` configuration. - -```powershell -kubectl apply -f policy-demo-linux.yaml -``` - -## Create pods on Window nodes - -Next, we’ll create a client (pwsh) and server (porter) pod on the Windows nodes. -:::note - -The pwsh and porter pod manifests below use images based on mcr.microsoft.com/windows/servercore:1809. If you are using a more recent Windows Server version, update the manifests to use a servercore image that matches your Windows Server version. - -::: - -### Create the policy-demo-windows.yaml using your favorite editor on Windows - -```yaml -apiVersion: v1 -kind: Pod -metadata: - name: pwsh - namespace: calico-demo - labels: - app: pwsh -spec: - containers: - - name: pwsh - image: mcr.microsoft.com/windows/servercore:1809 - args: - - powershell.exe - - -Command - - 'Start-Sleep 360000' - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/os: windows ---- -apiVersion: v1 -kind: Pod -metadata: - name: porter - namespace: calico-demo - labels: - app: porter -spec: - containers: - - name: porter - image: calico/porter:1809 - ports: - - containerPort: 80 - env: - - name: SERVE_PORT_80 - value: This is a Calico for Windows demo. - imagePullPolicy: IfNotPresent - nodeSelector: - kubernetes.io/os: windows -``` - -### Apply the policy-demo-windows.yaml file to the Kubernetes cluster - -1. Open a PowerShell window. -1. Use `kubectl` to apply the `policy-demo-windows.yaml` configuration - -```powershell -kubectl apply -f policy-demo-windows.yaml -``` - -### Verify four pods have been created and are running - -:::note - -Launching the Windows pods is going to take some time depending on your network download speed. - -::: - -1. Open a PowerShell window. -1. Using `kubectl` to list the pods in the `calico-demo` namespace. - -```powershell -kubectl get pods --namespace calico-demo -``` - -You should see something like the below - -```output -NAME READY STATUS RESTARTS AGE -busybox 1/1 Running 0 4m14s -nginx 1/1 Running 0 4m14s -porter 0/1 ContainerCreating 0 74s -pwsh 0/1 ContainerCreating 0 2m9s -``` - -Repeat the command every few minutes until the output shows all 4 pods in the Running state. - -```output -NAME READY STATUS RESTARTS AGE -busybox 1/1 Running 0 7m24s -nginx 1/1 Running 0 7m24s -porter 1/1 Running 0 4m24s -pwsh 1/1 Running 0 5m19s -``` - -### Check connectivity between pods on Linux and Windows nodes - -Now that client and server pods are running on both Linux and Windows nodes, let’s verify that client pods on Linux nodes can reach server pods on Windows nodes. - -1. Open a PowerShell window. -1. Using `kubectl` to determine the porter pod IP address: - - ```powershell - kubectl get pod porter --namespace calico-demo -o 'jsonpath={.status.podIP}' - ``` - -1. Log into the busybox pod and try reaching the porter pod on port 80. Replace the `` tag with the IP address returned from the previous command. - - ```powershell - kubectl exec --namespace calico-demo busybox -- nc -vz 80 - ``` - - :::note - - You can also combine both of the above steps: - - ::: - - ```powershell - kubectl exec --namespace calico-demo busybox -- nc -vz $(kubectl get pod porter --namespace calico-demo -o 'jsonpath={.status.podIP}') 80 - ``` - - If the connection from the busybox pod to the porter pod succeeds, you will get output similar to the following: - - ```powershell - 192.168.40.166 (192.168.40.166:80) open - ``` - - :::note - - The IP addresses returned will vary depending on your environment setup. - - ::: - -1. Now you can verify that the pwsh pod can reach the nginx pod: - - ```powershell - kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po nginx -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 - ``` - - If the connection succeeds, you will see output similar to: - - ``` - StatusCode : 200 - StatusDescription : OK - Content : - - - Welcome to nginx! - - <... - ``` - -1. Verify that the pwsh pod can reach the porter pod: - - ```powershell - kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 - ``` - - If that succeeds, you will see something like: - - ``` - StatusCode : 200 - StatusDescription : OK - Content : This is a Calico for Windows demo. - RawContent : HTTP/1.1 200 OK - Content-Length: 49 - Content-Type: text/plain; charset=utf-8 - Date: Fri, 21 Aug 2020 22:45:46 GMT - - This is a Calico for Windows demo. - Forms : - Headers : {[Content-Length, 49], [Content-Type, text/plain; - charset=utf-8], [Date, Fri, 21 Aug 2020 22:45:46 GMT]} - Images : {} - InputFields : {} - Links : {} - ParsedHtml : - RawContentLength : 49 - - ``` - -You have now verified that communication is possible between all pods in the application. - -## Apply policy to the Windows client pod - -In a real world deployment you would want to make sure only pods that are supposed to communicate with each other, are actually allowed to do so. - -To achieve this you will apply a basic network policy which allows only the busybox pod to reach the porter pod. - -### Create the network-policy.yaml file using your favorite editor on Windows - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: allow-busybox - namespace: calico-demo -spec: - podSelector: - matchLabels: - app: porter - policyTypes: - - Ingress - ingress: - - from: - - podSelector: - matchLabels: - app: busybox - ports: - - protocol: TCP - port: 80 -``` - -### Apply the network-policy.yaml file - -1. Open a PowerShell window. -1. Use `kubectl` to apply the network-policy.yaml file. - -```powershell -kubectl apply -f network-policy.yaml -``` - -### Verify the policy is in effect - -With the policy in place, the busybox pod should still be able to reach the porter pod: -:::note - -We will be using the combined command line from earlier in this chapter. - -::: - -```powershell -kubectl exec --namespace calico-demo busybox -- nc -vz $(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') 80 -``` - -However, the pwsh pod will not able to reach the porter pod: - -```powershell -kubectl exec --namespace calico-demo pwsh -- powershell Invoke-WebRequest -Uri http://$(kubectl get po porter -n calico-demo -o 'jsonpath={.status.podIP}') -UseBasicParsing -TimeoutSec 5 -``` - -The request times out with a message like the below: - -```powershell -Invoke-WebRequest : The operation has timed out. -At line:1 char:1 -+ Invoke-WebRequest -Uri http://192.168.40.166 -UseBasicParsing -Timeout ... -+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - + CategoryInfo : InvalidOperation: (System.Net.HttpWebRequest:Htt -pWebRequest) [Invoke-WebRequest], WebException - + FullyQualifiedErrorId : WebCmdletWebResponseException,Microsoft.PowerShell.Commands.InvokeWebRequestCommand -command terminated with exit code 1 -``` - -## Wrap up - -In this demo we’ve configured pods on Linux and Windows nodes, verified basic pod connectivity, and tried a basic network policy to isolate pod to pod traffic. -As the final step you can clean up all of the demo resources: - -1. Open a PowerShell window. - -```powershell -kubectl delete namespace calico-demo -``` - - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/index.mdx deleted file mode 100644 index 6e0421123d..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install and configure Calico for Windows. -hide_table_of_contents: true ---- - -# Calico for Windows - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubeconfig.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubeconfig.mdx deleted file mode 100644 index 9a5e846458..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubeconfig.mdx +++ /dev/null @@ -1,115 +0,0 @@ ---- -description: Configure kubeconfig for Calico for Windows. ---- - -# Create kubeconfig for Windows nodes - -## Big picture - -Create kubeconfig for Windows nodes for manual installations of {{prodnameWindows}}. - -## How to - -In a manual installation of {{prodnameWindows}}, {{prodname}} requires a kubeconfig file to access the API server. This section describes how to find an existing `calico-node` service account used by {{prodname}} on Linux side, and then to export the service account token as a kubeconfig file for {{prodname}} to use. - -:::note - -In general, the node kubeconfig as used by kubelet does not have enough permissions to access {{prodname}}-specific resources. - -::: - -### Export calico-node service account token as a kubeconfig file - -:::note - -If your Kubernetes version is v1.24.0 or higher, service account token secrets are no longer automatically created. Before continuing, [manually create](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#manually-create-a-service-account-api-token) the calico-node service account token: - -```bash -kubectl apply -f - <: -``` - -Then, find the secret containing the service account token for the `calico-node` service account: - -``` -kubectl get secret -n calico-system | grep calico-node -``` - -Inspect the output and find the name of the token, store it in a variable: - -``` -$ name=calico-node-token-xxxxx -``` - -Extract the parts of the secret, storing them in variables: - -``` -$ ca=$(kubectl get secret/$name -o jsonpath='{.data.ca\.crt}' -n calico-system) - -$ token=$(kubectl get secret/$name -o jsonpath='{.data.token}' -n calico-system | base64 --decode) - -$ namespace=$(kubectl get secret/$name -o jsonpath='{.data.namespace}' -n calico-system | base64 --decode) -``` - -Then, output the file: - -```bash -cat < calico-config -apiVersion: v1 -kind: Config -clusters: -- name: kubernetes - cluster: - certificate-authority-data: ${ca} - server: ${server} -contexts: -- name: calico-windows@kubernetes - context: - cluster: kubernetes - namespace: calico-system - user: calico-windows -current-context: calico-windows@kubernetes -users: -- name: calico-windows - user: - token: ${token} -EOF -``` - -Copy this config file to the windows node `{{rootDirWindows}}\calico-kube-config` and set the KUBECONFIG environment variable in `config.ps1` to point to it. diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/index.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/index.mdx deleted file mode 100644 index c923ac2a0a..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Get Calico for Windows running in your Kubernetes cluster. -hide_table_of_contents: true ---- - -# Kubernetes - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/rancher.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/rancher.mdx deleted file mode 100644 index ab0573bd1a..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/rancher.mdx +++ /dev/null @@ -1,128 +0,0 @@ ---- -description: Install Calico for Windows on a Rancher RKE cluster. ---- - -# Install Calico for Windows on a Rancher Kubernetes Engine cluster - -## Big picture - -Install {{prodnameWindows}} on a Rancher Kubernetes Engine (RKE) cluster. - -## Value - -Run Linux and Windows workloads on a RKE cluster with {{prodname}}. - -## Before you begin - -**Supported** - -- RKE Kubernetes 1.20, 1.19, or 1.18 - -**Supported networking** - -- BGP with no encapsulation -- VXLAN - -**Required** - -- An RKE cluster provisioned with [no network plugin](https://rancher.com/docs/rke/latest/en/config-options/add-ons/network-plugins#disabling-deployment-of-a-network-plug-in) - but which otherwise meets the {{prodnameWindows}} Kubernetes [cluster requirements](requirements.mdx). This guide was tested with RKE v1.18.9. -- One or more Windows nodes that meet the [requirements](requirements.mdx). - -## How to - -The following steps will outline the installation of {{prodname}} networking on the RKE cluster, then the installation of {{prodnameWindows}} on the Windows nodes. - -1. Install the Tigera {{prodname}} operator and custom resource definitions. - - ``` - kubectl create -f {{manifestsUrl}}/manifests/tigera-operator.yaml - ``` - - :::note - - Due to the large size of the CRD bundle, `kubectl apply` might exceed request limits. Instead, use `kubectl create` or `kubectl replace`. - - ::: - -1. Download the necessary Installation custom resource. - - ```bash - wget {{manifestsUrl}}/manifests/custom-resources.yaml - ``` - -1. Update the `calicoNetwork` options, ensuring that the correct pod CIDR is set. (Rancher uses `10.42.0.0/16` by default.) - Below are sample installations for VXLAN and BGP networking using the default Rancher pod CIDR: - - **VXLAN** - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - # Configures Calico networking. - calicoNetwork: - bgp: Disabled - # Note: The ipPools section cannot be modified post-install. - ipPools: - - blockSize: 26 - cidr: 10.42.0.0/16 - encapsulation: VXLAN - natOutgoing: Enabled - nodeSelector: all() - ``` - - **BGP** - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - # Configures Calico networking. - calicoNetwork: - # Note: The ipPools section cannot be modified post-install. - ipPools: - - blockSize: 26 - cidr: 10.42.0.0/16 - encapsulation: None - natOutgoing: Enabled - nodeSelector: all() - ``` - - :::note - - For more information on configuration options available in this manifest, see [the installation reference](../../../../reference/installation/api.mdx). - - ::: - -1. Apply the updated custom resources: - - ```bash - kubectl create -f custom-resources.yaml - ``` - -1. Configure strict affinity: - - ```bash - kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' - ``` - -1. Finally, follow the {{prodnameWindows}} [quickstart guide for Kubernetes](../quickstart.mdx#install-calico-for-windows-manually) - For VXLAN clusters, follow the instructions under the "Kubernetes VXLAN" tab. For BGP clusters, follow the instructions under the "Kubernetes BGP" tab. - - :::note - - For Rancher default values for service CIDR and DNS cluster IP, see the [Rancher kube-api service options](https://rancher.com/docs/rke/latest/en/config-options/services/#kubernetes-api-server-options). - - ::: - -1. Check the status of the nodes with `kubectl get nodes`. If you see that the Windows node has the status `Ready`, then you have a {{prodnameWindows}} on RKE cluster ready for Linux and Windows workloads! - -## Next steps - -- [Try the basic policy demo](../demo.mdx) -- [Secure pods with {{prodname}} network policy](../../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/requirements.mdx deleted file mode 100644 index f55f3a1217..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/requirements.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -description: Review the requirements for the standard install for Calico for Windows. ---- - -# Requirements - -## About {{prodnameWindows}} - -Because the Kubernetes and {{prodname}} control components do not run on Windows yet, a hybrid Linux/Windows cluster is required. The {{prodnameWindows}} standard installation is distributed as a **.zip archive**. - -## What's supported in this release - -✓ Install: Manifest install for Kubernetes clusters - -✓ Platforms: Kubernetes, OpenShift, RKE, EKS, AKS - -✓ Networking: - -- Kubernetes, on-premises: Calico CNI with BGP or VXLAN -- OpenShift: Calico CNI with BGP or VXLAN -- Rancher Kubernetes Engine: Calico CNI with BGP or VXLAN -- EKS: VPC CNI -- AKS: Azure CNI - -## Requirements - -### CNI and networking options - -The following table summarizes the networking options and considerations. - -| Networking | Components | **Value/Content** | -| ------------------ | ------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| {{prodname}} BGP | Windows CNI plugin:

    calico.exeLinux: {{prodname}} for policy and networking | {{prodname}}'s native networking approach, supports:
    - Auto-configured node-to-node BGP mesh over an L2 fabric
    - Peering with external routers for an L3 fabric
    - {{prodname}} IPAM and IP aggregation (with some limitations)
    - Route reflectors (including the new in-cluster route reflector introduced in {{prodname}} v3.3). **Note**: Windows node cannot act as route reflectors.
    - Kubernetes API datastore driver

    **AWS users**: If running on AWS, you must disable the source/dest check on your EC2 instances so that hosts can forward traffic on behalf of pods. | -| {{prodname}} VXLAN | Windows CNI plugin:
    calico.exe

    Linux: {{prodname}} for policy and networking | {{prodname}}'s VXLAN overlay, supports:

    - VXLAN overlay, which can traverse most networks.
    - Auto-configured node-to-node routing
    - {{prodname}} IPAM and IP aggregation (with some limitations)
    - Kubernetes API datastore driver
    **Note**: VXLAN runs on UDP port 4789 (this is the only port supported by Windows), remember to open that port between your {{prodname}} hosts in any firewalls / security groups. | -| Cloud provider | Windows CNI plugin: win-bridge.exe

    Linux: {{prodname}} policy-only | A useful fallback, particularly if you have a Kubernetes cloud provider that automatically installs inter-host routes. {{prodname}} has been tested with the standard **win-bridge.exe** CNI plugin so it should work with any networking provider that ultimately uses win-bridge.exe to network the pod (such as the Azure CNI plugin and cloud provider). | - -:::note - -If Calico CNI with VXLAN is used, BGP must be disabled. See the [installation reference](../../../../reference/installation/api.mdx#operator.tigera.io/v1.BGPOption). - -::: - -### Datastores - -Whether you use etcd or Kubernetes datastore (kdd), the datastore for the Windows node/Kubernetes cluster must be the same as the datastore for the Linux control node. (You cannot mix datastores in {{prodnameWindows}}.) - -### Kubernetes version - -See the [Kubernetes requirements](../../requirements.mdx#kubernetes-requirements). - -Earlier versions may work, but we do not actively test {{prodnameWindows}} against them, and they may have known issues and incompatibilities. - -### Linux platform requirements - -- At least one Linux Kubernetes worker node to run {{prodname}}'s cluster-wide components that meets [Linux system requirements](../../requirements.mdx), and is installed with {{prodname}} v3.12+. -- VXLAN or BGP without encapsulation is supported if using {{prodname}} CNI. IPIP ({{prodname}}'s default encapsulation mode) is not supported. Use the following command to turn off IPIP. - -```bash -calicoctl patch felixconfiguration default -p '{"spec":{"ipipEnabled":false}}' -``` - -- If using {{prodname}} IPAM, strict affinity of IPAM configuration must be set to `true`. - -```bash -kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' -``` - -:::note - -For operator-managed Linux {{prodname}} clusters, three Linux worker nodes are required to meet high-availability requirements for Typha. - -::: - -### Windows platform requirements - -- Windows versions: - - - Windows Server 1809 (build 17763.1432 or later) - - Windows Server 2022 (build 20348.169 or later) - - :::note - - Windows Server version support differs for each Kubernetes version. Review the [Windows OS Version Support](https://kubernetes.io/docs/setup/production-environment/windows/intro-windows-in-kubernetes/#windows-os-version-support) table for the Windows Server versions supported by each Kubernetes version. - - ::: - -- Be able to run commands as Administrator using PowerShell. -- Container runtime: [Docker](https://docs.microsoft.com/en-us/virtualization/windowscontainers/quick-start/set-up-environment?tabs=Windows-Server) or [containerd](https://kubernetes.io/docs/setup/production-environment/container-runtimes/#containerd) is installed and running. If containerd is running, it will be used as the container runtime otherwise Docker is assumed. -- Remote access to the Windows node via Remote Desktop Protocol (RDP) or Windows Remote Management (WinRM) -- If you are using {{prodname}} BGP networking, the RemoteAccess service must be installed for the Windows BGP Router. -- Windows nodes support only a single IP pool type (so, if using a VXLAN pool, you should only use VXLAN throughout the cluster). -- TLS v1.2 enabled. For example: - -```powershell -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -``` - -### EKS requirements - -- The VPC controllers must be installed to run Windows pods. -- An instance role on the Windows instance must have permissions to get `namespaces` and get `secrets` in the calico-system namespace (or kube-system namespace if you are using a non operator-managed {{prodname}} installation.) - -### AKS requirements - -- {{prodnameWindows}} can be enabled only on newly created clusters. -- Available with Kubernetes version 1.20 or later - -## Next steps - -[Install Calico for Windows](standard.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/standard.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/standard.mdx deleted file mode 100644 index 9ea217ed1a..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/kubernetes/standard.mdx +++ /dev/null @@ -1,310 +0,0 @@ ---- -description: Install Calico for Windows to enable a workload-to-workload Zero Trust model that protects modern business and legacy applications. ---- - -# Install Calico for Windows - -## Big picture - -Install {{prodnameWindows}} on Kubernetes clusters. The standard installation for {{prodnameWindows}} requires more time and expertise to configure. If you need to get started quickly, we recommend the [Quickstart](../quickstart.mdx). - -## Value - -Extend your Kubernetes deployment to Windows environments. - -## Before you begin - -**Required** - -- Install and configure [calicoctl](../../../../operations/calicoctl/index.mdx) -- Linux and Windows nodes [meet requirements](requirements.mdx) -- If using {{prodname}} networking, copy the kubeconfig file (used by kubelet) to each Windows node to the file, `c:\k\config`. -- Download {{prodnameWindows}} and Kubernetes binaries to each Windows nodes to prepare for install: - - On each of your Windows nodes, download and run {{prodnameWindows}} installation scripts: - - ``` - Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 - c:\install-calico-windows.ps1 -DownloadOnly yes -KubeVersion - ``` - - cd into `{{rootDirWindows}}`, you will see the calico-node.exe binary, install scripts, and other files. - -## How to - -Because the Kubernetes and {{prodname}} control components do not run on Windows yet, a hybrid Linux/Windows cluster is required. First you create a Linux cluster for {{prodname}} components, then you join Windows nodes to the Linux cluster. - -The geeky details of what you get by default: - - - -**Kubernetes** - -1. [Create a Linux cluster](#create-a-linux-cluster) -1. [Ensure pods run on the correct nodes](#ensure-pods-run-on-the-correct-nodes) -1. [Prepare Windows nodes to join the Linux cluster](#prepare-windows-nodes-to-join-the-linux-cluster) - -**{{prodname}}** - -1. [Install Calico on Linux control and worker nodes](#install-calico-on-linux-control-and-worker-nodes) -1. [Install Calico and Kubernetes on Windows nodes](#install-calico-and-kubernetes-on-windows-nodes) - -### Create a Linux cluster - -There are many ways to create a Linux Kubernetes cluster. We regularly test {{prodnameWindows}} with `kubeadm`. - -### Ensure pods run on the correct nodes - -A primary issue of running a hybrid Kubernetes cluster is that many Kubernetes manifests do not specify a **node selector** to restrict where their pods can run. For example, `kubeadm` installs `kube-proxy` (Kubernetes per-host NAT daemon) using a DaemonSet that does not include a node selector. This means that the kube-proxy pod, which only supports Linux, will be scheduled to both Linux and Windows nodes. Services/pods that should run only on Linux nodes (such as the `kube-proxy` DaemonSet) should be started with a node selector to avoid attempting to schedule them to Windows nodes. - -To get around this for `kube-proxy`: - -1. Use `kubectl` to retrieve the DaemonSet. - - ``` - kubectl get ds kube-proxy -n kube-system -o yaml > kube-proxy.yaml - ``` - -1. Modify the `kube-proxy.yaml` file to include a node selector that selects only Linux nodes: - - ```yaml noValidation - spec: - template: - ... - spec: - nodeSelector: - kubernetes.io/os: linux - containers: - ``` - -1. Apply the updated manifest. - - ``` - kubectl apply -f kube-proxy.yaml - ``` - -A similar change may be needed for other Kubernetes services (such as `kube-dns` or `core-dns`). - -### Prepare Windows nodes to join the Linux cluster - -On each Windows node, follow the steps below to configure `kubelet` and `kube-proxy` service. - -**Step 1: Configure kubelet** - -`kubelet` must be configured to use CNI networking by setting the following command line arguments, depending on the installed container runtime. - -For Docker: - -- `--network-plugin=cni` -- `--cni-bin-dir=` -- `--cni-conf-dir=` - -For containerd: - -- `--container-runtime=remote` -- `--container-runtime-endpoint=npipe:////.//pipe//containerd-containerd` - -The CNI bin and conf dir settings are required by the {{prodname}} installer to install the CNI binaries and configuration file. - -:::note - -Among other parameters, the containerd configuration file includes options to configure the CNI bin and conf dirs. - -::: - -The following kubelet settings are also important: - -- `--hostname-override` can be set to $(hostname) to match {{prodname}}'s default. `kubelet` and {{prodname}} must agree on the host/nodename; if your network environment results in hostnames that vary over time you should set the hostname override to a static value per host and update {{prodname}}'s nodename accordingly. -- `--node-ip` should be used to explicitly set the IP that kubelet reports to the API server for the node. We recommend setting this to the host's main network adapter's IP since we've seen kubelet incorrectly use an IP assigned to a HNS bridge device rather than the host's network adapter. -- Because of a Windows networking limitation, if using {{prodname}} IPAM, --max-pods should be set to, at most, the IPAM block size of the IP pool in use minus 4: - - | **IP pool block size** | **Max pods** | - | ---------------------- | -------------- | - | /n | 2^/32-n^ - 4 | - | /24 | 252 | - | /25 | 124 | - | /26 (default) | 60 | - | /27 | 28 | - | /28 | 12 | - | /29 | 4 | - | /30 or above | Cannot be used | - -In addition, it's important that `kubelet` is started after the vSwitch has been created, which happens when {{prodname}} initializes the dataplane. Otherwise, `kubelet` can be disconnected for the API server when the vSwitch is created. - -**AWS users**: If using the AWS cloud provider, you should add the following argument to the `kubelet`: - -`--hostname-override=` (and set the {{prodname}} nodename variable to match). In addition, you should add `KubernetesCluster=` as a tag when creating your Windows instance. - -**As a quickstart**, the {{prodname}} package includes a sample script at `{{rootDirWindows}}\kubernetes\kubelet-service.ps1` that: - -- Waits for {{prodname}} to initialise the vSwitch -- Starts `kubelet` with - - If containerd service is running, the following flags are set: - - --container-runtime set to `remote` - - --container-runtime-endpoint set to `npipe:////.//pipe//containerd-containerd` - - Otherwise, the following flags are set for Docker: - - --network-plugin set to `cni` - - --cni-bin-dir set to `c:\k\cni` - - --cni-conf-dir set to `c:\k\cni\config` - - --pod-infra-container-image set to `kubeletwin/pause` - - --kubeconfig set to the path of node kubeconfig file - - --hostname-override set to match {{prodname}}'s nodename - - --node-ip set to the IP of the default vEthernet device - - --cluster-dns set to the IPs of the dns name servers - -See the README in the same directory for more details. Feel free to modify the script to adjust other `kubelet` parameters. - -:::note - -The script will pause at the first stage until {{prodname}} is installed by following the instructions in the next section. - -::: - -**Step 2: Configure kube-proxy** - -`kube-proxy` must be configured as follows: - -- With the correct HNS network name used by the active CNI plugin. kube-proxy reads the HNS network name from an environment variable KUBE_NETWORK - - With default configuration, {{prodname}} uses network name "{{prodname}}" -- For VXLAN, with the source VIP for the pod subnet allocated to the node. This is the IP that kube-proxy uses when it does SNAT for a NodePort. For {{prodname}}, the source VIP should be the second IP address in the subnet chosen for the host. For example, if {{prodname}} chooses an IP block 10.0.0.0/26 then the source VIP should be 10.0.0.2. The script below will automatically wait for the block to be chosen and configure kube-proxy accordingly. -- For {{prodname}} policy to function correctly with Kubernetes services, the WinDSR feature gate must be enabled. This requires Windows Server build 17763.1432 or greater and Kubernetes v1.14 or greater. {{prodname}} will automatically enable the WinDSR feature gate if kubernetes services are managed by {{prodnameWindows}}. - -kube-proxy should be started via a script that waits for the Calico HNS network to be provisioned. The {{prodname}} package contains a suitable script for use with {{prodname}} networking at `{{rootDirWindows}}\kubernetes\kube-proxy-service.ps1`. The script: - -- Waits for {{prodname}} to initialise the vSwitch. -- Calculates the correct source VIP for the local subnet. -- Starts kube-proxy with the correct feature gates and hostname to work with {{prodname}}. - -See the README in the same directory for more details. Feel free to modify the script to -adjust other kube-proxy parameters. - -The script will pause at the first stage until {{prodname}} is installed by following the instructions in the next section. - -### Install Calico on Linux control and worker nodes - -**If using {{prodname}} BGP networking** - -1. Disable the default {{prodname}} IP-in-IP networking (which is not compatible with Windows), by modifying the {{prodname}} manifest, and setting the `CALICO_IPV4POOL_IPIP` environment variable to "Never" before applying the manifest. - - If you do apply the manifest with the incorrect value, changing the manifest and re-applying will have no effect. To adjust the already-created IP pool: - - ```bash - calicoctl get ippool -o yaml > ippool.yaml - ``` - - Then, modify ippool.yaml by setting the `ipipMode` to `Never` and then apply the updated manifest: - - ```bash - calicoctl apply -f ippool.yaml - ``` - -**If using {{prodname}} VXLAN networking** - -1. Modify VXLAN as described in [Customize the manifests](../../self-managed-onprem/config-options.mdx) guide. Note the following: - - - Windows can support only a single type of IP pool so it is important that you use only a single VXLAN IP pool in this mode. - - Windows supports only VXLAN on port 4789 and VSID ≥ 4096. {{prodname}}'s default (on Linux and Windows) is to use port 4789 and VSID 4096. - -1. Apply the manifest using `calicoctl`, and verify that you have a single pool with `VXLANMODE Always`. - - ```bash - calicoctl get ippool -o wide - ``` - -1. For Linux control nodes using {{prodname}} networking, strict affinity must be set to `true`. - This is required to prevent Linux nodes from borrowing IP addresses from Windows nodes: - ```bash - kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' - ``` - -### Install Calico and Kubernetes on Windows nodes - -Follow the steps below on each Windows node to install Kubernetes and {{prodname}}: - -**If using {{prodname}} BGP** - -Install the RemoteAccess service using the following PowerShell commands: - -```powershell -Install-WindowsFeature RemoteAccess -Install-WindowsFeature RSAT-RemoteAccess-PowerShell -Install-WindowsFeature Routing -``` - -Then restart the computer: - -```powershell -Restart-Computer -Force -``` - -before running: - -```powershell -Install-RemoteAccess -VpnType RoutingOnly -``` - -Sometimes the remote access service fails to start automatically after install. To make sure it is running, execute the following command: - -```powershell -Start-Service RemoteAccess -``` - -1. If using a non-{{prodname}} network plugin for networking, install and verify it now. -2. Edit the install configuration file, `config.ps1` as follows: - - | **Set this variable...** | To... | - | ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | - | $env:KUBE_NETWORK | CNI plugin you plan to use. For {{prodname}}, set the variable to `{{prodname}}.*` | - | $env:CALICO_NETWORKING_BACKEND | `windows-bgp` `vxlan` or `none` (if using a non-{{prodname}} CNI plugin). | - | $env:CNI\_ variables | Location of your Kubernetes installation. | - | $env:K8S_SERVICE_CIDR | Your Kubernetes service cluster IP CIDR. | - | $env:CALICO_DATASTORE_TYPE | {{prodname}} datastore you want to use. | - | $env:KUBECONFIG | Location of the kubeconfig file {{prodname}} should use to access the Kubernetes API server. To set up a secure kubeconfig with the correct permissions for {{prodnameWindows}}, see [Create a kubeconfig](../kubeconfig.mdx) for {{prodnameWindows}}. | - | $env:ETCD\_ parameters | etcd3 datastore parameters. **Note**: Because of a limitation of the Windows dataplane, a Kubernetes service ClusterIP cannot be used for the etcd endpoint (the host compartment cannot reach Kubernetes services). | - | $env:NODENAME | Hostname used by kubelet. The default uses the node's hostname. **Note**: If you are using the sample kubelet start-up script from the {{prodname}} package, kubelet is started with a hostname override that forces it to use this value. | - | | For AWS to work properly, kubelet should use the node's internal domain name for the AWS integration. | - -3. Run the installer. - - - Change directory to the location that you unpacked the archive. For example: - -```powershell -cd {{rootDirWindows}} -``` - -- Run the install script: - -``` -.\install-calico.ps1 -``` - -:::note - -The installer initializes the Windows vSwitch, which can cause a short connectivity outage as the networking stack is reconfigured. After running that command, you may need to: - -- Reconnect to your remote desktop session. -- Restart `kubelet` and `kube-proxy` if they were already running. -- If you haven't started `kubelet` and `kube-proxy` already, you should do so now. The quickstart scripts provided in the {{prodname}} package provide an easy way to do this. {{prodname}} requires `kubelet` to be running to complete its per-node configuration (since Kubelet creates the Kubernetes Node resource). - -::: - -:::note - - After you run the installer, do not move the directory because the service registration refers to the path of the directory. - -::: - -4. Verify that the {{prodname}} services are running. - - ```powershell - Get-Service -Name CalicoNode - Get-Service -Name CalicoFelix - ``` - -## Next steps - -- [Create a kubeconfig](../kubeconfig.mdx) -- [Review network policy limitations in Windows](../limitations.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/limitations.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/limitations.mdx deleted file mode 100644 index ce10cabfb1..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/limitations.mdx +++ /dev/null @@ -1,174 +0,0 @@ ---- -description: Review limitations before starting installation. ---- - -# Limitations and known issues - -## Calico for Windows feature limitations - -| Feature | | -| ------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Distributions | **Supported:**
    - EKS
    - AKS
    - AWS
    - GCE
    - Azure
    - Kubernetes on-premises
    - Kubernetes on DigitalOcean
    - OpenShift
    - Rancher RKE

    **Not supported**:
    - GKE
    - IKS
    - OpenStack
    - K3 clusters | -| Install and upgrade | **Supported**: Manifest with manual upgrade

    **Not supported**:
    - Operator install
    - Non-cluster hosts
    - Typha component for scaling (Linux-based feature) | -| Networking | **Supported**:
    - Calico VXLAN, no cross-subnet or VXLAN MTU settings with [limitations](#{{prodname}}-vxlan-networking-limitations)
    - Calico non-overlay mode with BGP peering with [limitations](#{{prodname}}-bgp-networking-limitations)
    - IPv4

    **Not supported**:
    - Overlay mode with BGP peering
    - IP in IP overlay with BGP routing
    - Cross-subnet support and MTU setting for VXLAN
    - IPv6 and dual stack
    - Service advertisement | -| Security | **Not supported**:
    - Application Layer Policy (ALP) for Istio
    - Policy for hosts (host endpoints, including automatic host endpoints)
    - Encryption with WireGuard | -| Operations | **Not supported**:
    - Calico node status | -| Metrics | **Not supported**: Prometheus monitoring | -| eBPF | **Not supported**: (Linux-based feature) | - -## {{prodname}} BGP networking limitations - -If you are using {{prodname}} with BGP, note these current limitations with Windows. - -| Feature | Limitation | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| IP mobility/ borrowing | {{prodname}} IPAM allocates IPs to host in blocks for aggregation purposes.
    If the IP pool is full, nodes can also "borrow" IPs from another node's block. In BGP terms, the borrower then advertises a more specific "/32" route for the borrowed IP and traffic for that IP only is routed to the borrowing host.

    Windows nodes do not support this borrowing mechanism; they will not borrow IPs even if the IP pool is full and they mark their blocks so that Linux nodes will not borrow from them. | -| IPs reserved for Windows | {{prodname}} IPAM allocates IPs in CIDR blocks. Due to networking requirements on Windows, four IPs per Windows node-owned block must be reserved for internal purposes.

    For example, with the default block size of /26, each block contains 64 IP addresses, 4 are reserved for Windows, leaving 60 for pod networking.

    To reduce the impact of these reservations, a larger block size can be configured at the IP pool scope (before any pods are created). | -| Single IP block per host | {{prodname}} IPAM is designed to allocate blocks of IPs (default size /26) to hosts on demand. While the {{prodname}} CNI plugin was written to do the same, kube-proxy currently only supports a single IP block per host.

    To work around the default limit of one /26 per host there some options:

    - With {{prodname}} BGP networking and the etcd datastore before creating any blocks, change the block size used by the IP pool so that it is sufficient for the largest number of Pods that are to be used on a single Windows host.
    - Use {{prodname}} BGP networking with the kubernetes datastore. In that mode, {{prodname}} IPAM is not used and the CNI host-local IPAM plugin is used with the node's Pod CIDR.

    To allow multiple IPAM blocks per host (at the expense of kube-proxy compatibility), set the `windows_use_single_network` flag to `false` in the `cni.conf.template` before installing {{prodname}}. Changing that setting after pods are networked is not recommended because it may leak HNS endpoints. | -| IP-in-IP overlay | {{prodname}}'s IPIP overlay mode cannot be used in clusters that contain Windows nodes because Windows does not support IP-in-IP. | -| NATOutgoing | {{prodname}} IP pools support a "NAT outgoing" setting with the following behaviour:

    - Traffic between {{prodname}} workloads (in any IP pools) is not NATted.
    - Traffic leaving the configured IP pools is NATted if the workload has an IP within an IP pool that has NAT outgoing enabled. {{prodnameWindows}} honors the above setting but it is only applied at pod creation time. If the IP pool configuration is updated after a pod is created, the pod's traffic will continue to be NATted (or not) as before. NAT policy for newly-networked pods will honor the new configuration. {{prodnameWindows}} automatically adds the host itself and its subnet to the NAT exclusion list. This behaviour can be disabled by setting flag `windows_disable_host_subnet_nat_exclusion` to `true` in `cni.conf.template` before running the install script. | -| Service IP advertisement | This {{prodname}} feature is not supported on Windows. | - -### Check your network configuration - -If you are using a networking type that requires layer 2 reachability (such as {{prodname}} with a BGP mesh and no peering to your fabric), you can check that your network has layer 2 reachability as follows: - -On each of your nodes, check the IP network of the network adapter that you plan to use for pod networking. For example, on Linux, assuming your network adapter is eth0, you can run: - -``` -$ ip addr show eth0 - 2: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - - link/ether 00:0c:29:cb:c8:19 brd ff:ff:ff:ff:ff:ff - inet 192.168.171.136/24 brd 192.168.171.255 scope - - global eth0 - valid_lft forever preferred_lft forever - inet6 fe80::20c:29ff:fecb:c819/64 scope - link - - valid_lft forever preferred_lft - forever -``` - -In this case, the IPv4 is 192.168.171.136/24; which, after applying the /24 mask gives 192.168.171.0/24 for the IP network. - -Similarly, on Windows, you can run - -``` -PS C:\> ipconfig - -Windows IP Configuration - -Ethernet adapter vEthernet (Ethernet 2): - - Connection-specific DNS Suffix . : - us-west-2.compute.internal Link-local IPv6 Address . . . . - . : fe80::6d10:ccdd:bfbe:bce2%15 IPv4 Address. . . . . . . - . . . . : 172.20.41.103 Subnet Mask . . . . . . . . . . . - : 255.255.224.0 Default Gateway . . . . . . . . . : - 172.20.32.1 - -``` - -In this case, the IPv4 address is 172.20.41.103 and the mask is represented as bytes 255.255.224.0 rather than CIDR notation. Applying the mask, we get a network address 172.20.32.0/19. - -Because the linux node has network 192.168.171.136/24 and the Windows node has a different network, 172.20.32.0/19, they are unlikely to be on the same layer 2 network. - -## {{prodname}} VXLAN networking limitations - -Because of differences between the Linux and Windows dataplane feature sets, the following {{prodname}} features are not supported on Windows. - -| Feature | Limitation | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| IPs reserved for Windows | {{prodname}} IPAM allocates IPs in CIDR blocks. Due to networking requirements on Windows, four IPs per Windows node-owned block must be reserved for internal purposes.

    For example, with the default block size of /26, each block contains 64 IP addresses, 4 are reserved for Windows, leaving 60 for pod networking.

    To reduce the impact of these reservations, a larger block size can be configured at the IP pool scope (before any pods are created). | -| Single IP block per host | {{prodname}} IPAM is designed to allocate blocks of IPs (default size /26) to hosts on demand. While the {{prodname}} CNI plugin was written to do the same, kube-proxy currently only supports a single IP block per host.
    To allow multiple IPAM blocks per host (at the expense of kube-proxy compatibility), set the `windows_use_single_network` flag to `false` in the `cni.conf.template` before installing {{prodname}}. Changing that setting after pods are networked is not recommended because it may leak HNS endpoints. | - -## Routes are lost in cloud providers - -If you create a Windows host with a cloud provider (AWS for example), the creation of the vSwitch at {{prodname}} install time can remove the cloud provider's metadata route. If your application relies on the metadata service, you may need to examine the routing table before and after installing {{prodname}} to reinstate any lost routes. - -## VXLAN limitations - -**VXLAN support** - -- Windows 1903 build 18317 and above -- Windows 1809 build 17763 and above - -**Configuration updates** - -Certain configuration changes will not be honored after the first pod is networked. This is because Windows does not currently support updating the VXLAN subnet parameters after the network is created so updating those parameters requires the node to be drained: - -One example is the VXLAN VNI setting. To change such parameters: - -- Drain the node of all pods -- Delete the {{prodname}} HNS network: - - ```powershell - Import-Module -DisableNameChecking {{rootDirWindows}}\libs\hns\hns.psm1 - Get-HNSNetwork | ? Name -EQ "{{prodname}}" | Remove-HNSNetwork - ``` - -- Update the configuration in `config.ps1`, run `uninstall-calico.ps1` and then `install-calico.ps1` to regenerate the CNI configuration. - -## Pod-to-pod connections are dropped with TCP reset packets - -Restarting Felix or changes to policy (including changes to endpoints referred to in policy), can cause pod-to-pod connections to be dropped with TCP reset packets. When one of the following occurs: - -- The policy that applies to a pod is updated -- Some ingress or egress policy that applies to a pod contains selectors and the set of endpoints that those selectors match changes - -Felix must reprogram the HNS ACL policy attached to the pod. This reprogramming can cause TCP resets. Microsoft has confirmed this is a HNS issue, and they are investigating. - -## Service ClusterIPs incompatible with selectors/pod IPs in network policy - -**Windows 1809 prior to build 17763.1432** - -On Windows nodes, kube-proxy unconditionally applies source NAT to traffic from local pods to service ClusterIPs. This means that, at the destination pod, where policy is applied, the traffic appears to come from the source host rather than the source pod. In turn, this means that a network policy with a source selector matching the source pod will not match the expected traffic. - -## Network policy and using selectors - -Under certain conditions, relatively simple {{prodname}} policies can require significant Windows dataplane resources, that can cause significant CPU and memory usage, and large policy programming latency. - -We recommend avoiding policies that contain rules with both a source and destination selector. The following is an example of a policy that would be inefficient. The policy applies to all workloads, and it only allows traffic from workloads labeled as clients to workloads labeled as servers: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: calico-dest-selector -spec: - selector: all() - order: 500 - ingress: - - action: Allow - destination: - selector: role == "webserver" - source: - selector: role == "client" -``` - -Because the policy applies to all workloads, it will be rendered once per workload (even if the workload is not labeled as a server), and then the selectors will be expanded into many individual dataplane rules to capture the allowed connectivity. - -Here is a much more efficient policy that still allows the same traffic: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: calico-dest-selector -spec: - selector: role == "webserver" - order: 500 - ingress: - - action: Allow - source: - selector: role == "client" -``` - -The destination selector is moved into the policy selector, so this policy is only rendered for workloads that have the `role: webserver` label. In addition, the rule is simplified so that it only matches on the source of the traffic. Depending on the number of webserver pods, this change can reduce the dataplane resource usage by several orders of magnitude. - -## Next steps - -- [Quickstart](quickstart.mdx) -- [Standard install](kubernetes/standard.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/maintain.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/maintain.mdx deleted file mode 100644 index b607d8441f..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/maintain.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: Tasks to manage Calico services and uninstall Calico for Windows. ---- - -# Start and stop Calico for Windows services - -## Big picture - -Start, stop, and update {{prodnameWindows}} services on the Linux control plane node, and uninstall for {{prodnameWindows}}. - -## How to - -### Start and stop {{prodnameWindows}} services - -- Install and boot {{prodnameWindows}}: `install-calico.ps1` -- Start {{prodnameWindows}} services:`start-calico.ps1` -- Stop {{prodnameWindows}} services: `stop-calico.ps1` - -### Update {{prodname}} services - -To change the parameters defined in `config.ps1`: - -- Run `uninstall-calico.ps1` to remove {{prodnameWindows}} service configuration -- Modify the configuration -- Run `install-calico.ps1`to reinstall {{prodnameWindows}}. - -Because `config.ps1` is imported by the various component startup scripts, additional environment variables can be added, as documented in the [{{prodname}} reference guide](../../../reference/index.mdx). - -### Update service wrapper configuration - -The `nssm` command supports changing a number of configuration options for the {{prodname}} services. For example, to adjust the maximum size of the Felix log file before it is rotated: - -```powershell -nssm set CalicoFelix AppRotateBytes 1048576 -``` - -### Uninstall {{prodnameWindows}} from Windows nodes - -The following steps removes {{prodnameWindows}} (for example to change configuration), but keeps the cluster running. - -1. Remove all pods from the Windows nodes. -1. On each Windows node, run the uninstall script: - - ```powershell - {{rootDirWindows}}\uninstall-calico.ps1 - ``` - - :::note - - If you are uninstalling to change configuration, make sure that you run the uninstall script with the old configuration file. - - ::: - -### Uninstall kubelet and kube-proxy services from Windows nodes - -The following steps uninstall kubelet/kube-proxy services if they were installed by running `{{rootDirWindows}}\kubernetes\install-kube-services.ps1`. - -1. Remove all pods from the Windows nodes. -1. On each Windows node, run the uninstall script: - - ``` - {{rootDirWindows}}\kubernetes\uninstall-kube-services.ps1 - ``` - -1. If desired, delete the `{{rootDirWindows}}` directory. diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/openshift-installation.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/openshift-installation.mdx deleted file mode 100644 index 216188667d..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/openshift-installation.mdx +++ /dev/null @@ -1,339 +0,0 @@ ---- -description: Install Calico on an OpenShift 4 cluster on Windows nodes ---- - -# Install an OpenShift 4 cluster on Windows nodes - -:::note - -Currently, {{prodnameWindows}} supports Openshift versions only up to v4.5 because it requires the Windows Machine Config Bootstrapper binary (wmcb.exe) for adding Windows nodes to clusters. OpenShift v4.6+ does not support the Windows Machine Config Bootstrapper binary and uses the Red Hat Windows Machine Config Operator (WMCO), which does not correctly recognize {{prodname}} networking in the cluster. - -::: - -## Big picture - -Install an OpenShift 4 cluster on AWS with {{prodname}} on Windows nodes. - -## Value - -Run Windows workloads on OpenShift 4 with {{prodname}}. - -## How to - -### Before you begin - -- Ensure that your environment meets the {{prodname}} [system requirements](../openshift/requirements.mdx). - -- Ensure that you have [configured an AWS account](https://docs.openshift.com/container-platform/4.4/installing/installing_aws/installing-aws-account.html) appropriate for OpenShift 4, - and have [set up your AWS credentials](https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html). - Note that the OpenShift installer supports a subset of [AWS regions](https://docs.openshift.com/container-platform/4.4/installing/installing_aws/installing-aws-account.html#installation-aws-regions_installing-aws-account). - -- Ensure that you have a [RedHat account](https://cloud.redhat.com/). A RedHat account is required to obtain the pull secret necessary to provision an OpenShift cluster. - -- Ensure that you have installed the OpenShift installer **v4.4 or later** and OpenShift command line interface from [cloud.redhat.com](https://cloud.redhat.com/openshift/install/aws/installer-provisioned). - -- Ensure that you have [generated a local SSH private key](https://docs.openshift.com/container-platform/4.4/installing/installing_aws/installing-aws-default.html#ssh-agent-using_installing-aws-default) and have added it to your ssh-agent - -**Limitations** - -Due to an [upstream issue](https://bugzilla.redhat.com/show_bug.cgi?id=1768858), Windows pods can only be run in specific namespaces if you disable SCC. -To do this, label the namespace with `openshift.io/run-level: "1"`. - -### Create a configuration file for the OpenShift installer - -First, create a staging directory for the installation. This directory will contain the configuration file, along with cluster state files, that OpenShift installer will create: - -```bash -mkdir openshift-tigera-install && cd openshift-tigera-install -``` - -Now run OpenShift installer to create a default configuration file: - -```bash -openshift-install create install-config -``` - -:::note - -Refer to the [OpenShift installer documentation](https://cloud.redhat.com/openshift/install) for more information -about the installer and any configuration changes required for your platform. - -::: - -Once the installer has finished, your staging directory will contain the configuration file `install-config.yaml`. - -### Update the configuration file to use {{prodname}} - -Override the OpenShift networking to use Calico and update the AWS instance types to meet the [system requirements](../openshift/requirements.mdx): - -```bash -sed -i 's/\(OpenShiftSDN\|OVNKubernetes\)/Calico/' install-config.yaml -``` - -### Generate the install manifests - -Now generate the Kubernetes manifests using your configuration file: - -```bash -openshift-install create manifests -``` - - - -### Configure VXLAN - -Edit the Installation custom resource manifest `manifests/01-cr-installation.yaml` so that it configures an OpenShift {{prodname}} cluster with VXLAN enabled and BGP disabled: - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - variant: Calico - calicoNetwork: - bgp: Disabled - ipPools: - - blockSize: 26 - cidr: 10.128.0.0/14 - encapsulation: VXLAN - natOutgoing: Enabled - nodeSelector: all() -``` - -### Create the cluster - -Start the cluster creation with the following command and wait for it to complete. - -```bash -openshift-install create cluster -``` - -Once the above command is complete, you can verify {{prodname}} is installed by verifying the components are available with the following command. - -```bash -oc get tigerastatus -``` - -:::note - -To get more information, add `-o yaml` to the above command. - -::: - -Next, [install calicoctl](../../../operations/calicoctl/install.mdx) and ensure strict affinity is true: - -```bash -kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' -``` - -### Add Windows nodes to the cluster - -Download the latest [Windows Node Installer (WNI)](https://github.com/openshift/windows-machine-config-bootstrapper/releases) binary `wni` that matches your OpenShift minor version. - -:::note - -For OpenShift 4.6, use the latest wni for OpenShift 4.5. A wni binary for OpenShift 4.6 is not published yet. - -::: - -Next, determine the AMI id corresponding to Windows Server 1903 (build 18317) or greater. `wni` defaults to using Windows Server 2019 (build 10.0.17763) which does not include WinDSR support. -One way to do this is by searching for AMI's matching the string `Windows_Server-1903-English-Core-ContainersLatest` in the Amazon EC2 console - -Next, run `wni` to add a Windows node to your cluster. Replace AMI_ID, AWS_CREDENTIALS_PATH, AWS_KEY_NAME and AWS_PRIVATE_KEY_PATH with your values: - -```bash -chmod u+x wni -./wni aws create \ - --image-id AMI_ID \ - --kubeconfig openshift-tigera-install/auth/kubeconfig \ - --credentials AWS_CREDENTIALS_PATH \ - --credential-account default \ - --instance-type m5a.large \ - --ssh-key AWS_KEY_NAME \ - --private-key AWS_PRIVATE_KEY_PATH -``` - -An example of running the above steps: - -``` -$ chmod u+x wni -$ ./wni aws create \ -> --kubeconfig openshift-tigera-install/auth/kubeconfig \ -> --credentials ~/.aws/credentials \ -> --credential-account default \ -> --instance-type m5a.large \ -> --ssh-key test-key \ -> --private-key /home/user/.ssh/test-key.pem -2020/10/05 12:52:51 kubeconfig source: /home/user/openshift-tigera-install/auth/kubeconfig -2020/10/05 12:52:59 Added rule with port 5986 to the security groups of your local IP -2020/10/05 12:52:59 Added rule with port 22 to the security groups of your local IP -2020/10/05 12:52:59 Added rule with port 3389 to the security groups of your local IP -2020/10/05 12:52:59 Using existing Security Group: sg-06d1de22807d5dc48 -2020/10/05 12:57:30 External IP: 52.35.12.231 -2020/10/05 12:57:30 Internal IP: 10.0.90.193 -``` - -### Get the administrator password - -The `wni` binary writes the instance details to the file `windows-node-installer.json`. An example of the file: - -``` -{"InstanceIDs":["i-02e13d4cc76c13c83"],"SecurityGroupIDs":["sg-0a777565d64e1d2ef"]} -``` - -Use the instance ID from the file and the path of the private key used to create the instance to get the Administrator user's password: - -```bash -aws ec2 get-password-data --instance-id --priv-launch-key -``` - -### Install {{prodnameWindows}} - -1. Remote into the Windows node, open a Powershell window, and prepare the directory for Kubernetes files. - - ```powershell - mkdir c:\k - ``` - -1. Copy the Kubernetes kubeconfig file (default location: openshift-tigera-install/auth/kubeconfig), to the file **c:\k\config**. - -1. Download the powershell script, **install-calico-windows.ps1**. - - ```powershell - Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 - ``` - -1. Run the installation script, replacing the Kubernetes version with the version corresponding to your version of OpenShift. - - ```powershell - c:\install-calico-windows.ps1 -KubeVersion -ServiceCidr 172.30.0.0/16 -DNSServerIPs 172.30.0.10 - ``` - - :::note - - Get the Kubernetes version with `oc version` and use only the major, minor, and patch version numbers. For example from a cluster that returns: - - ``` - $ oc version - Client Version: 4.5.3 - Server Version: 4.5.14 - Kubernetes Version: v1.18.3+5302882 - ``` - - You will use `1.18.3`: - - ::: - -1. Install and start kube-proxy service. Execute following powershell script/commands. - - ```powershell - C:\CalicoWindows\kubernetes\install-kube-services.ps1 -service kube-proxy - Start-Service -Name kube-proxy - ``` - -1. Verify kube-proxy service is running. - - ```powershell - Get-Service -Name kube-proxy - ``` - -### Configure kubelet - -From the Windows node, download the Windows Machine Config Bootstrapper `wmcb.exe` that matches your OpenShift minor version from [Windows Machine Config Bootstrapper releases](https://github.com/openshift/windows-machine-config-bootstrapper/releases). For example, for OpenShift 4.5.x: - -```powershell -curl https://github.com/openshift/windows-machine-config-bootstrapper/releases/download/v4.5.2-alpha/wmcb.exe -o c:\wmcb.exe -``` - -:::note - -For OpenShift 4.6, use the latest wmcb.exe for OpenShift 4.5. A wmcb.exe binary for OpenShift 4.6 is not published yet. - -::: - -Next, we will download the `worker.ign` file from the API server: - -```powershell -$apiServer = c:\k\kubectl --kubeconfig c:\k\config get po -n openshift-kube-apiserver -l apiserver=true --no-headers -o custom-columns=":metadata.name" | select -first 1 -c:\k\kubectl --kubeconfig c:\k\config -n openshift-kube-apiserver exec $apiserver -- curl -ks https://localhost:22623/config/worker > c:\worker.ign -((Get-Content c:\worker.ign) -join "`n") + "`n" | Set-Content -NoNewline c:\worker.ign -``` - -Next, we run wmcb to configure the kubelet: - -```powershell -c:\wmcb.exe initialize-kubelet --ignition-file worker.ign --kubelet-path c:\k\kubelet.exe -``` - -:::note - -The kubelet configuration installed by Windows Machine Config -Bootstrapper includes `--register-with-taints="os=Windows:NoSchedule"` which -will require Windows pods to tolerate that taint. - -::: - -Next, we make a copy of the kubeconfig because `wmcb.exe` expects the kubeconfig to be the file `c:\k\kubeconfig`. -Then we configure kubelet to use Calico CNI: - -```powershell -cp c:\k\config c:\k\kubeconfig -c:\wmcb.exe configure-cni --cni-dir c:\k\cni --cni-config c:\k\cni\config\10-calico.conf -``` - -Finally, clean up the additional files created on the Windows node: - -```powershell -rm c:\k\kubeconfig,c:\wmcb.exe,c:\worker.ign -``` - -Exit the remote session to the Windows node and return to a shell to a Linux -node. - -We need to approve the CSR's generated by the kubelet's bootstrapping process. First, view the pending CSR's: - -```bash -oc get csr -``` - -For example: - -``` -$ oc get csr -NAME AGE SIGNERNAME REQUESTOR CONDITION -csr-55brx 4m32s kubernetes.io/kube-apiserver-client-kubelet system:admin Approved,Issued -csr-bmnfd 4m30s kubernetes.io/kubelet-serving system:node:ip-10-0-45-102.us-west-2.compute.internal Pending -csr-hwl89 5m1s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending -``` - -To approve the pending CSR's: - -```bash -oc get csr -o name | xargs oc adm certificate approve -``` - -For example: - -``` -$ oc get csr -o name | xargs oc adm certificate approve -certificatesigningrequest.certificates.k8s.io/csr-55brx approved -certificatesigningrequest.certificates.k8s.io/csr-bmnfd approved -certificatesigningrequest.certificates.k8s.io/csr-hwl89 approved -``` - -Finally, wait a minute or so and get all nodes: - -``` -$ oc get node -owide -``` - -If the Windows node registered itself successfully, it should appear in the list with a Ready status, ready to run Windows pods! - -## Next steps - -**Recommended - Security** - -- [Secure Calico component communications](../../../network-policy/comms/crypto-auth.mdx) -- [Secure pods with Calico network policy](../../../network-policy/get-started/calico-policy/calico-network-policy.mdx) -- If you are using {{prodname}} with Istio service mesh, get started here: [Enable application layer policy](../../../network-policy/istio/app-layer-policy.mdx) diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/quickstart.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/quickstart.mdx deleted file mode 100644 index 49d7c3e5aa..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/quickstart.mdx +++ /dev/null @@ -1,525 +0,0 @@ ---- -description: Install Calico for Windows on a Kubernetes cluster for testing or development. ---- - -# Quickstart - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Install {{prodnameWindows}} on your Kubernetes cluster in approximately 5 minutes. - -## Concepts - -{{prodnameWindows}} is a hybrid implementation that requires a Linux cluster for {{prodname}} components and Linux workloads, and Windows nodes for Windows workloads. - -## Before you begin - -Review the [Linux requirements](../requirements.mdx) and the [{{prodnameWindows}} requirements](kubernetes/requirements.mdx). - -Before beginning the quickstart, setup a {{prodname}} cluster on Linux nodes and provision Windows machines. - -## How to - -- [Configure strict affinity for clusters using {{prodname}} networking](#configure-strict-affinity-for-clusters-using-calico-networking) -- [Install {{prodnameWindows}} manually](#install-calico-for-windows-manually) -- [Install {{prodnameWindows}} using HostProcess containers](#install-calico-for-windows-using-hostprocess-containers) -- [Configure installation parameters](#configure-installation-parameters) - -### Configure strict affinity for clusters using {{prodname}} networking - -For Linux control nodes using {{prodname}} networking, strict affinity must be set to `true`. -This is required to prevent Linux nodes from borrowing IP addresses from Windows nodes: - -```bash -kubectl patch ipamconfigurations default --type merge --patch='{"spec": {"strictAffinity": true}}' -``` - -:::note - -If the above command failed to find ipamconfigurations resource, you need to install Calico API server. Please refer to [installing the Calico API server](../../../operations/install-apiserver.mdx). - -::: - -### Install {{prodnameWindows}} manually - -The following steps install a Kubernetes cluster on a single Windows node with a Linux control node. - -- **Kubernetes VXLAN** - - The geeky details of what you get by default: - - - -- **Kubernetes BGP** - - The geeky details of what you get by default: - - - -- **EKS** - - The geeky details of what you get by default: - - - -- **AKS** - - The geeky details of what you get by default: - - - - - - -1. Ensure that BGP is disabled since you're using VXLAN. - If you installed Calico using operator, you can do this by: - - ```bash - kubectl patch installation default --type=merge -p '{"spec": {"calicoNetwork": {"bgp": "Disabled"}}}' - ``` - - If you installed Calico using the manifest then BGP is already disabled. - -1. Prepare the directory for Kubernetes files on Windows node. - - ```powershell - mkdir c:\k - ``` - -1. Copy the Kubernetes kubeconfig file from the control plane node (default, Location $HOME/.kube/config), to **c:\k\config**. - -1. Download the PowerShell script, **install-calico-windows.ps1**. - - ```powershell - Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 - ``` - -1. Install {{prodnameWindows}} for your datastore with using the default parameters or [customize installation parameters](#configure-installation-parameters). - The PowerShell script downloads {{prodnameWindows}} release binary, Kubernetes binaries, Windows utilities files, configures {{prodnameWindows}}, and starts the Calico service. - - **Kubernetes datastore (default)** - - ```powershell - c:\install-calico-windows.ps1 -KubeVersion ` - -ServiceCidr ` - -DNSServerIPs - ``` - - **etcd datastore** - - ```powershell - c:\install-calico-windows.ps1 -KubeVersion ` - -Datastore etcdv3 ` - -EtcdEndpoints ` - -EtcdTlsSecretName (default no etcd TLS secret is used) ` - -EtcdKey (default not using TLS) ` - -EtcdCert (default not using TLS) ` - -EtcdCaCert (default not using TLS) ` - -ServiceCidr ` - -DNSServerIPs - ``` - - :::note - - - You do not need to pass a parameter if the default value of the parameter is correct for your cluster. - - If your Windows nodes have multiple network adapters, you can configure the one used for VXLAN by editing `VXLAN_ADAPTER` in `{{rootDirWindows}}\config.ps1`, then restarting {{prodnameWindows}}. - - ::: - -1. Verify that the {{prodname}} services are running. - - ```powershell - Get-Service -Name CalicoNode - Get-Service -Name CalicoFelix - ``` - -1. Install and start kubelet/kube-proxy service. Execute following PowerShell script/commands. - - ```powershell - {{rootDirWindows}}\kubernetes\install-kube-services.ps1 - Start-Service -Name kubelet - Start-Service -Name kube-proxy - ``` - -1. Verify kubelet/kube-proxy services are running. - - ```powershell - Get-Service -Name kubelet - Get-Service -Name kube-proxy - ``` - - - - -1. Enable BGP service on Windows node (instead of VXLAN). - Install the RemoteAccess service using the following Powershell commands: - - ```powershell - Install-WindowsFeature RemoteAccess - Install-WindowsFeature RSAT-RemoteAccess-PowerShell - Install-WindowsFeature Routing - ``` - - Then restart the computer: - - ```powershell - Restart-Computer -Force - ``` - - before running: - - ```powershell - Install-RemoteAccess -VpnType RoutingOnly - ``` - - Sometimes the remote access service fails to start automatically after install. To make sure it is running, execute the following command: - - ```powershell - Start-Service RemoteAccess - ``` - -1. Prepare the directory for Kubernetes files on Windows node. - - ```powershell - mkdir c:\k - ``` - -1. Copy the Kubernetes kubeconfig file from the control plane node (default, Location $HOME/.kube/config), to **c:\k\config**. - -1. Download the PowerShell script, **install-calico-windows.ps1**. - - ```powershell - Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 - ``` - -1. Install {{prodnameWindows}} for your datastore with using the default parameters or [customize installation parameters](#configure-installation-parameters). - The PowerShell script downloads {{prodnameWindows}} release binary, Kubernetes binaries, Windows utilities files, configures {{prodnameWindows}}, and starts the Calico service. - - You do not need to pass a parameter if the default value of the parameter is correct for your cluster. - - **Kubernetes datastore (default)** - - ```powershell - c:\install-calico-windows.ps1 -KubeVersion ` - -ServiceCidr ` - -DNSServerIPs - ``` - - **etcd datastore** - - ```powershell - c:\install-calico-windows.ps1 -KubeVersion ` - -Datastore etcdv3 ` - -EtcdEndpoints ` - -EtcdTlsSecretName (default no etcd TLS secret is used) ` - -EtcdKey (default not using TLS) ` - -EtcdCert (default not using TLS) ` - -EtcdCaCert (default not using TLS) ` - -ServiceCidr ` - -DNSServerIPs - ``` - - :::note - - You do not need to pass a parameter if the default value of the parameter is correct for your cluster. - - ::: - -1. Verify that the {{prodname}} services are running. - - ```powershell - Get-Service -Name CalicoNode - Get-Service -Name CalicoFelix - ``` - -1. Install and start kubelet/kube-proxy service. Execute following PowerShell script/commands. - - ```powershell - {{rootDirWindows}}\kubernetes\install-kube-services.ps1 - Start-Service -Name kubelet - Start-Service -Name kube-proxy - ``` - -1. Verify kubelet/kube-proxy services are running. - - ```powershell - Get-Service -Name kubelet - Get-Service -Name kube-proxy - ``` - - - - -1. Ensure that a Windows instance role has permissions to get `namespaces` and to get `secrets` in the calico-system namespace (or kube-system namespace if you are using a non operator-managed {{prodname}} installation.) - One way to do this is by running the following commands to install the required permissions temporarily. Before running the commands, replace `` with the Kubernetes node name of the EKS Windows node, for example `ip-192-168-42-34.us-west-2.compute.internal`. - :::note - - If you are using a non operator-managed {{prodname}} installation, replace the namespace `calico-system` with `kube-system` in the commands below. - - ::: - - ```bash - kubectl create clusterrole calico-install-ns --verb=get --resource=namespace - kubectl create clusterrolebinding calico-install-ns --clusterrole=calico-install-ns --user=system:node: - kubectl create role calico-install-token --verb=get,list --resource=secrets --namespace calico-system - kubectl create rolebinding calico-install-token --role=calico-install-token --user=system:node: --namespace calico-system - ``` - -1. Prepare the directory for Kubernetes files on the Windows node. - - ```powershell - mkdir c:\k - ``` - -1. [Install kubectl](https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html#windows) and move the kubectl binary to **c:\k**. - -1. Download the PowerShell script, **install-calico-windows.ps1**. - - ```powershell - Invoke-WebRequest {{calicoReleasesURL}}/{{releaseTitle}}/install-calico-windows.ps1 -OutFile c:\install-calico-windows.ps1 - ``` - -1. Install {{prodnameWindows}} for your datastore with using the default parameters or [customize installation parameters](#configure-installation-parameters). - The PowerShell script downloads {{prodnameWindows}} release binary, Kubernetes binaries, Windows utilities files, configures {{prodnameWindows}}, and starts the Calico service. - - You do not need to pass a parameter if the default value of the parameter is correct for your cluster. - - **Kubernetes datastore (default)** - - ```powershell - c:\install-calico-windows.ps1 -ServiceCidr ` - -DNSServerIPs - ``` - - **etcd datastore** - - ```powershell - c:\install-calico-windows.ps1 -Datastore etcdv3 ` - -EtcdEndpoints ` - -ServiceCidr ` - -DNSServerIPs - ``` - - :::note - - You do not need to pass a parameter if the default value of the parameter is correct for your cluster. - - ::: - -1. Verify that the {{prodname}} services are running. - - ```powershell - Get-Service -Name CalicoNode - Get-Service -Name CalicoFelix - ``` - -1. Verify kubelet and kube-proxy services are running. - - ```powershell - Get-Service -Name kubelet - Get-Service -Name kube-proxy - ``` - -1. If you installed temporary RBAC in the first step, remove the permissions by running the following commands. - :::note - - If you are using a non operator-managed {{prodname}} installation, replace the namespace `calico-system` with `kube-system` in the commands below. - - ::: - - ```bash - kubectl delete clusterrolebinding calico-install-ns - kubectl delete clusterrole calico-install-ns - kubectl delete rolebinding calico-install-token --namespace calico-system - kubectl delete role calico-install-token --namespace calico-system - ``` - - - - -1. Register the `EnableAKSWindowsCalico` feature flag with the following Azure CLI command. - - ```bash - az feature register --namespace "Microsoft.ContainerService" --name "EnableAKSWindowsCalico" - ``` - -1. Wait until the `EnableAKSWindowsCalico` feature flag is registered successfully. Execute following CLI command to get current status of the feature. - - ```bash - az feature list -o table --query "[?contains(name, 'Microsoft.ContainerService/EnableAKSWindowsCalico')].{Name:name,State:properties.state}" - ``` - - Move to next step if the output from above command matches the following output. - - ```bash - Name State - ------------------------------------------------- ---------- - Microsoft.ContainerService/EnableAKSWindowsCalico Registered - ``` - -1. Refresh the registration of the `Microsoft.ContainerService` resource provider. Run the following command. - - ```bash - az provider register --namespace Microsoft.ContainerService - ``` - -1. Create the AKS cluster with these settings: `network-plugin` to `azure`, and `network-policy` to `calico`. For example, - - ```bash - az group create -n $your-resource-group -l $your-region - az aks create \ - --resource-group $your-resource-group \ - --name $your-cluster-name \ - --node-count 1 \ - --enable-addons monitoring \ - --windows-admin-username azureuser \ - --windows-admin-password $your-windows-password \ - --kubernetes-version 1.20.2 \ - --vm-set-type VirtualMachineScaleSets \ - --service-principal $your-service-principal \ - --client-secret $your-client-secret \ - --load-balancer-sku standard \ - --node-vm-size Standard_D2s_v3 \ - --network-plugin azure \ - --network-policy calico - ``` - -1. Add a Windows node pool. For example, - - ```bash - az aks nodepool add \ - --resource-group $your-resource-group \ - --cluster-name $your-cluster-name \ - --os-type Windows \ - --name $your-windows-node-pool-name \ - --node-count 1 \ - --kubernetes-version 1.20.2 \ - --node-vm-size Standard_D2s_v3 - ``` - - - - -Congratulations! You now have a Kubernetes cluster with {{prodnameWindows}} and a Linux control node. - -### Install {{prodnameWindows}} using HostProcess containers - -:::note - -This installation method is a tech preview and should not be used for production clusters. Upgrades from a tech preview version of this -installation method to the GA version might not be seamless. - -::: - -With Kubernetes v1.22, a new Windows container type called "HostProcess containers" can run directly on the host with access to the host network namespace, -storage, and devices. With this feature, {{prodnameWindows}} can now be installed and managed using Kubernetes resources such as Daemonsets and ConfigMaps, -instead of needing to configure and install {{prodnameWindows}} manually on each node. Using this installation method, the {{prodnameWindows}} -services are no longer registered on the host. Instead, the services are run directly within HostProcess containers. - -#### Requirements - -In addition to the [{{prodnameWindows}} requirements](kubernetes/requirements.mdx), -this installation method has [additional requirements](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/): - -- Kubernetes v1.22+ -- HostProcess containers support enabled: for v1.22, HostProcess containers support has to be [enabled](https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/#before-you-begin-version-check). For Kubernetes v1.23+, HostProcess containers are enabled by default. -- ContainerD 1.6.0+ -- The Windows nodes have joined the cluster - -To install ContainerD on the Windows node and configure the ContainerD service: - -```powershell -Invoke-WebRequest {{tmpScriptsURL}}/scripts/Install-Containerd.ps1 -OutFile c:\Install-Containerd.ps1 -c:\Install-Containerd.ps1 -ContainerDVersion 1.6.2 -CNIConfigPath "c:/etc/cni/net.d" -CNIBinPath "c:/opt/cni/bin" -``` - -If you have an existing {{prodnameWindows}} installation using the manual method, your Windows nodes may have already joined the cluster. - -To join a Windows node to a cluster provisioned with kubeadm: - -- Install kubeadm and kubelet binaries and install the kubelet service - -```powershell -Invoke-WebRequest {{tmpScriptsURL}}/scripts/PrepareNode.ps1 -OutFile c:\PrepareNode.ps1 -c:\PrepareNode.ps1 -KubernetesVersion v1.23.4 -ContainerRuntime ContainerD -``` - -- Run kubeadm on a control plane host and copy the join command - -```bash -kubeadm token create --print-join-command -``` - -- Edit the join command by appending `--cri-socket "npipe:////./pipe/containerd-containerd"` and update the kubeadm.exe path to `c:\k\kubeadm.exe`. - An example join command: - -``` -c:\k\kubeadm.exe join 172.16.101.139:6443 --token v8w2jt.jmc45acn85dbll1e --discovery-token-ca-cert-hash sha256:d0b7040a704d8deb805ba1f29f56bbc7cea8af6aafa78137a9338a62831739b4 --cri-socket "npipe:////./pipe/containerd-containerd" -``` - -- Run the join command on the Windows node. Shortly after it completes successfully, the Windows node will appear in `kubectl get nodes`. - The new node's status will be NotReady since the Calico CNI has not yet been installed. - -#### Migrating from {{prodnameWindows}} installed manually - -If your Windows nodes already have {{prodnameWindows}} installed using the manual installation method, you can continue this quickstart guide -to migrate to a manifest-based installation. This installation process will uninstall any existing {{prodnameWindows}} services and overwrite the {{prodnameWindows}} installation files with those included in the `calico/windows` image. If `kubelet` and `kube-proxy` were installed using `{{rootDirWindows}}\kubernetes\install-kube-services.ps1`, those services will updated in-place and remain installed. If those services were running, they are restarted so those services -will be updated in place and remain installed. - -:::note - -Before proceeding, take note of the configuration parameters in `{{rootDirWindows}}\config.ps1`. These configuration parameters will be needed during the install. - -::: - -#### Install - - - - - - - - - - - - - - -Congratulations! You now have a Kubernetes cluster with {{prodnameWindows}} and a Linux control node. - -### Configure installation parameters - -| **Parameter Name** | **Description** | **Default** | -| ------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------ | -| KubeVersion | Version of Kubernetes binaries to use. If the value is an empty string (default), the {{prodnameWindows}} installation script does not download Kubernetes binaries and run Kubernetes service. Use the default for managed public cloud. | "" | -| DownloadOnly | Download without installing {{prodnameWindows}}. Set to `yes` to manually install and configure {{prodnameWindows}}. For example, {{prodnameWindows}} the hard way. | no | -| Datastore | {{prodnameWindows}} datastore type [`kubernetes` or `etcdv3`] for reading endpoints and policy information. | kubernetes | -| EtcdEndpoints | Comma-delimited list of etcd connection endpoints. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379`. Valid only if `Datastore` is set to `etcdv3`. | "" | -| EtcdTlsSecretName | Name of a secret in `calico-system` namespace which contains `etcd-key`, `etcd-cert`, `etcd-ca` for automatically configuring TLS. Either use this or parameters `EtcdKey`, `EtcdCert`, `EtcdCaCert` below. Note: If you are not using operator-based installation, use namespace `kube-system`. | "" | -| EtcdKey | Path to key file for etcd TLS connection. | "" | -| EtcdCert | Path to certificate file for etcd TLS connection. | "" | -| EtcdCaCert | Path to CA certificate file for etcd TLS connection. | "" | -| ServiceCidr | Service IP range of the Kubernetes cluster. Not required for most managed Kubernetes clusters. Note: EKS has non-default value. | 10.96.0.0/12 | -| DNSServerIPs | Comma-delimited list of DNS service IPs used by Windows pod. Not required for most managed Kubernetes clusters. Note: EKS has a non-default value. | 10.96.0.10 | -| CalicoBackend | Calico backend network type (`vxlan` or `bgp`). If the value is an empty string (default), backend network type is auto detected. | "" | - -## Next steps - -You can now use the {{prodname}} Linux-based docs site for your documentation. Before you continue, review the [Limitations and known issues](limitations.mdx) to understand the features (and sections of documentation) that do not apply to Windows. diff --git a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/troubleshoot.mdx b/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/troubleshoot.mdx deleted file mode 100644 index 8d9d6e3bf5..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/kubernetes/windows-calico/troubleshoot.mdx +++ /dev/null @@ -1,202 +0,0 @@ ---- -description: Help for troubleshooting Calico for Windows issues in Calico this release. ---- - -# Troubleshoot Calico for Windows - -## Useful troubleshooting commands - -**Examine the HNS network(s)** - -When using the {{prodname}} CNI plugin, each {{prodname}} IPAM block (or the single podCIDR in host-local IPAM mode), is represented as a HNS l2bridge network. Use the following command to inspect the networks. - -```powershell -ipmo -DisableNameChecking {{rootDirWindows}}\libs\hns\hns.psm1 -Get-HNSNetwork -``` - -**Examine pod endpoints** - -Use the following command to view the HNS endpoints on the system. There should be one HNS endpoint per pod networked with {{prodname}}: - -```powershell -ipmo -DisableNameChecking {{rootDirWindows}}\libs\hns\hns.psm1 -Get-HNSEndpoint -``` - -## Troubleshoot - -### kubectl exec fails with timeout for Windows pods - -Ensure that the Windows firewall (and any network firewall or cloud security group) allows traffic to the host on port 10250. - -### kubelet fails to register, complains of node not found in logs - -This can be caused by a mismatch between a cloud provider (such as the AWS cloud provider) and the configuration of the node. For example, the AWS cloud provider requires that the node has a nodename matching its private domain name. - -### After initializing {{prodnameWindows}}, AWS metadata server is no longer reachable - -This is a known Windows issue that Microsoft is working on. The route to the metadata server is lost when the vSwitch is created. As a workaround, manually add the route back by running: - -```powershell -New-NetRoute -DestinationPrefix 169.254.169.254/32 -InterfaceIndex -``` - -Where `` is the index of the "vEthernet (Ethernet 2)" device as shown by - -```powershell -Get-NetAdapter -``` - -### Installation stalls at "Waiting for {{prodname}} initialization to finish" - -This can be caused by Window's Execution protection feature. Exit the install using Ctrl-C, unblock the scripts, run `uninstall-calico.ps1`, followed by `install-calico.ps1`. - -### Windows Server 2019 insider preview: after rebooting a node, {{prodnameWindows}} fails to start, the tigera-node.err.log file contains errors - -After rebooting the Windows node, pods fail to schedule, and the kubelet log has CNI errors like "timed out waiting for interface matching the management IP (169.254.57.5) of network" (where the IP address may vary but will always be a 169.254.x.x address). To workaround: - -- Stop and then start {{prodnameWindows}} using the `stop-calico.ps1` and `start-calico.ps1` scripts -- Sometimes the HNS network picks up a temporary self-assigned address at start-of-day and it does not get refreshed when the correct IP becomes known. Rebooting the node a second time often resolves the problem. - -### Invoke-Webrequest fails with TLS errors - -The error, "The request was aborted: Could not create SSL/TLS secure channel", often means that Windows does not support TLS v1.2 (which is required by many websites) by default. To enable TLS v1.2, run the following command: - -```powershell -[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 -``` - -### Kubelet persistently fails to contact the API server - -If kubelet is already running when {{prodnameWindows}} is installed, the creation of the container vSwitch can cause kubelet to lose its connection and then persistently fail to reconnect to the API server. -To resolve this, restart kubelet after installing {{prodnameWindows}}. - -### No connectivity between pods on Linux and Windows nodes - -If using AWS, check that the source/dest check is disabled on the interfaces assigned to your nodes. This allows nodes to forward traffic on behalf of local pods. -In AWS, the "Change Source/Dest. Check" option can be found on the Actions menu for a selected network interface. - -If using {{prodname}} networking, check that the {{prodname}} IP pool you are using has IPIP mode disabled (set to "Never). IPIP is not supported on Windows. To check the IP pool, you can use `calicoctl`: - -```bash -calicoctl get ippool -o yaml -``` - -Example output of an IP pool with IPIP disabled: - -```yaml -apiVersion: projectcalico.org/v3 -items: - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - creationTimestamp: 2018-11-26T15:37:39Z - name: default-ipv4-ippool - resourceVersion: '172' - uid: 34db7316-f191-11e8-ad7d-02850eebe6c4 - spec: - blockSize: 26 - cidr: 192.168.0.0/16 - disabled: true - ipipMode: Never - natOutgoing: true -``` - -### Felix log error: "Failed to create datastore client" - -If the error includes `loading config file ""`, follow the instructions in -[Set environment variables](kubernetes/standard.mdx#install-calico-and-kubernetes-on-windows-nodes) to update the `KUBECONFIG` environment variable to the path of your kubeconfig file. - -### Felix starts, but does not output logs - -By default, Felix waits to connect to the datastore before logging (in case the datastore configuration intentionally disables logging). To start logging at startup, update the [FELIX_LOGSEVERITYSCREEN environment variable](../../../reference/felix/configuration.mdx#general-configuration) to "info" or "debug" level. - -### {{prodname}} BGP mode: connectivity issues, Linux calico/node pods report unready - -Check the detailed health output that shows which health check failed: - -``` -kubectl describe pod -n calico-system -``` - -:::note - -Use namespace `kube-system` instead of `calico-system` if your Calico installation is non operator-managed. - -::: - -If the health check reports a BGP peer failure, check the IP address of the peer is either an -expected IP of a node or an external BGP peer. If the IP of the failed peering is a Windows node: - -- Check that the node is up a reachable over IP -- Check that the RemoteAccess service is installed and running: - - ```powershell - Get-Service | ? Name -EQ RemoteAccess - ``` - -- Check the logs for the confd service in the configured log directory for errors - (default {{rootDirWindows}}\logs). - -**Examine BGP state on a Windows host** - -The Windows BGP router exposes its configuration and state as PowerShell commandlets. - -**To show BGP peers**: - -```powershell -Get-BgpPeer -``` - -Example output: - -``` -PeerName LocalIPAddress PeerIPAddress PeerASN OperationMode ConnectivityStatus --------- -------------- ------------- ------- ------------- ------------------ -Mesh_172_20_48_43 172.20.55.101 172.20.48.43 64512 Mixed Connected -Mesh_172_20_51_170 172.20.55.101 172.20.51.170 64512 Mixed Connected -Mesh_172_20_54_3 172.20.55.101 172.20.54.3 64512 Mixed Connected -Mesh_172_20_58_252 172.20.55.101 172.20.58.252 64512 Mixed Connected -For an established peering, the ConnectivityStatus column should be "Connected". -``` - -**To examine routes learned from other hosts**: - -```powershell -Get-BgpRouteInformation -Type all -``` - -Example output: - -``` -DestinationNetwork NextHop LearnedFromPeer State LocalPref MED ------------------- ------- --------------- ----- --------- --- -10.243.128.192/26 172.20.58.252 Mesh_172_20_58_252 Best 100 -10.244.115.128/26 172.20.48.43 Mesh_172_20_48_43 Best 100 -10.244.128.192/26 172.20.58.252 Mesh_172_20_58_252 Best 100 -``` - -For active routes, the State should show as "Best". Routes with State equal to "Unresolved" -indicate that the BGP router could not resolve a route to the peer and the route will not be -used. This can occur if the networking state changes after the BGP router is started; -restarting the BGP router may solve the problem: - -```powershell -Restart-Service RemoteAccess -``` - -To see the routes being exported by this host: - -```powershell -(Get-BgpCustomRoute).Network -``` - -Example output: - -``` -10.243.214.152/29 -10.243.214.160/29 -10.243.214.168/29 -10.244.42.0/26 -``` diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/index.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/index.mdx deleted file mode 100644 index 5057d063c4..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico networking and network policy for OpenStack. -hide_table_of_contents: true ---- - -# OpenStack - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/devstack.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/devstack.mdx deleted file mode 100644 index bc8d09ba65..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/devstack.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -description: Quickstart to show connectivity between DevStack and Calico. ---- - -# DevStack - -The networking-calico project provides a DevStack plugin. The following -instructions explain how to set up a single or multiple node DevStack/{{prodname}} -system, and then how to see {{prodname}} connectivity in action. - -:::note - -networking-calico includes a -[shell script](https://github.com/projectcalico/calico/blob/master/networking-calico/devstack/bootstrap.sh) -that implements the following setup instructions. You are welcome to use it, -but we recommend that you read the following description first anyway, and -briefly review the script's code, so that you will understand what the -script does. - -::: - -1. Download DevStack as usual. - -2. Add to your DevStack local.conf file: - - ```bash - enable_plugin networking-calico https://github.com/projectcalico/networking-calico - ``` - -3. Run `stack.sh`. - -4. Create a shared, routed network with an IPv4 subnet: - - ```bash - . openrc admin admin - neutron net-create --shared --provider:network_type local calico - neutron subnet-create --gateway 10.65.0.1 --enable-dhcp --ip-version 4 --name calico-v4 calico 10.65.0/24 - ``` - -5. Ensure that IPv4 and IPv6 forwarding are enabled: - - ```bash - sysctl -w net.ipv4.ip_forward=1 - sysctl -w net.ipv6.conf.all.forwarding=1 - ``` - -## Multi-node setup - -This plugin also supports additional compute-only nodes. So, in the system as -a whole, there can then be: - -- one node with both controller and compute function - -- any number of additional nodes with just compute function. - -The first node should be prepared as described above. Then, for each -additional compute node: - -- set and export the SERVICE_HOST environment variable, to the name of the - controller node; for example: - - ```bash - export SERVICE_HOST=calico-vm18 - ``` - -- follow the steps above, except for the network and subnet creations, to - install and set up DevStack with {{prodname}} on that node. - -## Demonstrating {{prodname}} connectivity - -Then, to see {{prodname}} connectivity in action: - -1. Launch instances attached to the 'calico' network. - -2. Use `ip route` to observe per-instance routes created by the {{prodname}} agent. - -3. Log into each instance (e.g. through Horizon console) and verify that it can - ping the others. diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/index.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/index.mdx deleted file mode 100644 index 3e4dfe81db..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico on OpenStack -hide_table_of_contents: true ---- - -# Install Calico on OpenStack - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/overview.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/overview.mdx deleted file mode 100644 index 87b00ebd33..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/overview.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -description: Choose a method for installing Calico for OpenStack. ---- - -# Calico on OpenStack - -There are many ways to try out {{prodname}} with OpenStack. We provide instructions for the -following methods: - -- [Package-based install for Ubuntu](ubuntu.mdx) - -- [RPM-based install for Red Hat Enterprise Linux (RHEL)](redhat.mdx) - -- [DevStack](devstack.mdx) (for development purposes only—not recommended for production!) - -In all cases, except DevStack, you will need at least two or three servers to -get going: one OpenStack controller and one or more OpenStack compute nodes. diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/redhat.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/redhat.mdx deleted file mode 100644 index da2e8ba686..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/redhat.mdx +++ /dev/null @@ -1,287 +0,0 @@ ---- -description: Install Calico on OpenStack, Red Hat Enterprise Linux nodes. ---- - -# Red Hat Enterprise Linux - -import OpenStackEtcdAuth from '@site/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx'; - -These instructions will take you through a first-time install of -{{prodname}}. If you are upgrading an existing system, please see -[Upgrading {{prodname}} on OpenStack](../../../operations/upgrading/openstack-upgrade.mdx) -instead. - -There are two sections to the install: adding {{prodname}} to OpenStack -control nodes, and adding {{prodname}} to OpenStack compute nodes. Follow -the [Common steps](#common-steps) on each node before moving on to the specific -instructions in the control and compute sections. If you want to create a -combined control and compute node, work through all three sections. - -## Before you begin - -- Ensure that you meet the [requirements](../requirements.mdx). -- Confirm that you have SSH access to and root privileges on one or more Red Hat - Enterprise Linux (RHEL) hosts. -- Make sure you have working DNS between the RHEL hosts (use `/etc/hosts` if you - don't have DNS on your network). -- [Install OpenStack with Neutron and ML2 networking](http://docs.openstack.org) - on the RHEL hosts. - -## Common steps - -Some steps need to be taken on all machines being installed with {{prodname}}. -These steps are detailed in this section. - -1. [Add the EPEL repository](https://fedoraproject.org/wiki/EPEL). You may - have already added this to install OpenStack. - -1. Configure the {{prodname}} repository: - - ```bash - cat > /etc/yum.repos.d/calico.repo <` is the IP address of the etcd - server. - - ``` - [calico] - etcd_host = - ``` - -## Control node install - -On each control node, perform the following steps: - -1. Delete all configured OpenStack state, in particular any instances, - routers, subnets and networks (in that order) created by the install - process referenced above. You can do this using the web dashboard or - at the command line. - - :::tip - - The Admin and Project sections of the web dashboard both - have subsections for networks and routers. Some networks may - need to be deleted from the Admin section. - - ::: - - :::caution - - The {{prodname}} install will fail if incompatible state is - left around. - - ::: - -1. Edit `/etc/neutron/neutron.conf`. In the `[DEFAULT]` section, find - the line beginning with `core_plugin`, and change it to read `core_plugin = calico`. Also remove any existing setting for `service_plugins`. - -1. Install the `calico-control` package: - - ``` - yum install -y calico-control - ``` - -1. Restart the neutron server process: - - ``` - service neutron-server restart - ``` - -## Compute node install - -On each compute node, perform the following steps: - -1. Open `/etc/nova/nova.conf` and remove the line from the `[DEFAULT]` - section that reads: - - ```conf - linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver - ``` - - Remove the lines from the `[neutron]` section setting - `service_neutron_metadata_proxy` or `service_metadata_proxy` to - `True`, if there are any. Additionally, if there is a line setting - `metadata_proxy_shared_secret`, comment that line out as well. - - Restart nova compute. - - ```bash - service openstack-nova-compute restart - ``` - - If this node is also a controller, additionally restart nova-api. - - ```bash - service openstack-nova-api restart - ``` - -1. If they're running, stop the Open vSwitch services. - - ```bash - service neutron-openvswitch-agent stop - service openvswitch stop - ``` - - Then, prevent the services running if you reboot. - - ```bash - chkconfig openvswitch off - chkconfig neutron-openvswitch-agent off - ``` - - Then, on your control node, run the following command to find the - agents that you just stopped. - - ``` - neutron agent-list - ``` - - For each agent, delete them with the following command on your - control node, replacing `` with the ID of the agent. - - ``` - neutron agent-delete - ``` - -1. Install Neutron infrastructure code on the compute host. - - ``` - yum install -y openstack-neutron - ``` - -1. Edit `/etc/neutron/neutron.conf`. In the `[oslo_concurrency]` section, - ensure that the `lock_path` variable is uncommented and set as follows. - - ``` - # Directory to use for lock files. For security, the specified directory should - # only be writable by the user running the processes that need locking. - # Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, - # a lock path must be set. - lock_path = $state_path/lock - ``` - -1. Stop and disable the Neutron DHCP agent, and install the - {{prodname}} DHCP agent (which uses etcd, allowing it to scale to higher - numbers of hosts). - - ``` - service neutron-dhcp-agent stop - chkconfig neutron-dhcp-agent off - yum install -y calico-dhcp-agent - ``` - -1. Stop and disable any other routing/bridging agents such as the L3 - routing agent or the Linux bridging agent. These conflict - with {{prodname}}. - - ```bash - service neutron-l3-agent stop - chkconfig neutron-l3-agent off - ``` - - Repeat for bridging agent and any others. - -1. If this node is not a controller, install and start the Nova - Metadata API. This step is not required on combined compute and - controller nodes. - - ```bash - yum install -y openstack-nova-api - service openstack-nova-metadata-api restart - chkconfig openstack-nova-metadata-api on - ``` - -1. Install the BIRD BGP client. - - ```bash - yum install -y bird bird6 - ``` - -1. Install the `calico-compute` package. - - ```bash - yum install -y calico-compute - ``` - -1. Configure BIRD. By default {{prodname}} assumes that you will deploy a - route reflector to avoid the need for a full BGP mesh. To this end, it - includes configuration scripts to prepare a BIRD config file with a single - peering to the route reflector. If that's correct for your network, you can - run either or both of the following commands. - - For IPv4 connectivity between compute hosts: - - ```bash - calico-gen-bird-conf.sh - ``` - - And/or for IPv6 connectivity between compute hosts: - - ```bash - calico-gen-bird6-conf.sh - ``` - - You will also need to [configure your route reflector to allow connections from the compute node as a route reflector client](../../../networking/configuring/bgp.mdx) -. - - If you _are_ configuring a full BGP mesh you need to handle the BGP - configuration appropriately on each compute host. The scripts above can be - used to generate a sample configuration for BIRD, by replacing the - `` with the IP of one other compute host—this will - generate the configuration for a single peer connection, which you can - duplicate and update for each compute host in your mesh. - - To maintain connectivity between VMs if BIRD crashes or is upgraded, - configure BIRD graceful restart. Edit the systemd unit file - /usr/lib/systemd/system/bird.service (and bird6.service for IPv6): - - - Add `-R` to the end of the `ExecStart` line. - - Add `KillSignal=SIGKILL` as a new line in the `[Service]` section. - - Run `systemctl daemon-reload` to tell systemd to reread that file. - - Ensure that BIRD (and/or BIRD 6 for IPv6) is running and starts on - reboot. - - ```bash - service bird restart - service bird6 restart - chkconfig bird on - chkconfig bird6 on - ``` - -1. Create `/etc/calico/felix.cfg` with the following content, where `` is the IP - address of the etcd server. - - ```conf - [global] - DatastoreType = etcdv3 - EtcdAddr = :2379 - ``` - -1. Restart the Felix service. - - ``` - service calico-felix restart - ``` - - diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/ubuntu.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/ubuntu.mdx deleted file mode 100644 index bc040a0a06..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/ubuntu.mdx +++ /dev/null @@ -1,270 +0,0 @@ ---- -description: Install Calico on OpenStack, Ubuntu nodes. ---- - -# Ubuntu - -import OpenStackEtcdAuth from '@site/calico_versioned_docs/version-3.25/_includes/content/_openstack-etcd-auth.mdx'; - -These instructions will take you through a first-time install of -{{prodname}}. If you are upgrading an existing system, please see -[Upgrading {{prodname}} on OpenStack](../../../operations/upgrading/openstack-upgrade.mdx) -instead. - -There are two sections to the install: adding {{prodname}} to OpenStack -control nodes, and adding {{prodname}} to OpenStack compute nodes. Follow -the [Common steps](#common-steps) on each node before moving on to the specific -instructions in the control and compute sections. If you want to create a -combined control and compute node, work through all three sections. - -## Before you begin - -- Ensure that you meet the [requirements](../requirements.mdx). -- Confirm that you have SSH access to and root privileges on one or more Ubuntu hosts - (your OpenStack compute or control nodes). -- [Install OpenStack with Neutron and ML2 networking](http://docs.openstack.org) - on the Ubuntu hosts. - -## Common steps - -Some steps need to be taken on all machines being installed with {{prodname}}. -These steps are detailed in this section. - -1. Configure APT to use the {{prodname}} PPA: - - ```bash - add-apt-repository ppa:project-calico/{{ ppa_repo_name }} - ``` - -1. Add the official BIRD PPA. This PPA contains - fixes to BIRD that are not yet available in Ubuntu. To add the PPA, run: - - ```bash - add-apt-repository ppa:cz.nic-labs/bird - ``` - - :::tip - - If the above command fails with error - `'ascii' codec can't decode byte`, try running the command with a - UTF-8 enabled locale: - `LC_ALL=en_US.UTF-8 add-apt-repository ppa:cz.nic-labs/bird`. - - ::: - -1. Update your package manager on each machine: - - ```bash - apt-get update - ``` - -1. Install the `etcd3-gateway` Python package. A current copy of that code is - needed by {{prodname}}'s OpenStack driver and DHCP agent, so you - should install it with `pip3`. - - ```bash - apt-get install -y python3-pip - pip3 install git+https://github.com/dims/etcd3-gateway.git@5a3157a122368c2314c7a961f61722e47355f981 - ``` - -1. Edit `/etc/neutron/neutron.conf`. Add a `[calico]` section with - the following content, where `` is the IP address of the etcd - server. - - ``` - [calico] - etcd_host = - ``` - -## Control node install - -On each control node, perform the following steps. - -1. Delete all configured OpenStack state, in particular any instances, - routers, subnets and networks (in that order) created by the install - process referenced above. You can do this using the web dashboard or - at the command line. - - :::tip - - The Admin and Project sections of the web dashboard both - have subsections for networks and routers. Some networks may - need to be deleted from the Admin section. - - ::: - - :::caution - - The {{prodname}} install will fail if incompatible state is - left around. - - ::: - -1. Run `apt-get upgrade` and `apt-get dist-upgrade`. These commands - bring in {{prodname}}-specific updates to the OpenStack packages and - to `dnsmasq`. - -1. Edit `/etc/neutron/neutron.conf`. In the `[DEFAULT]` section, find - the line beginning with `core_plugin`, and change it to read `core_plugin = calico`. Also remove any existing setting for `service_plugins`. - -1. Install the `calico-control` package: - - ```bash - apt-get install -y calico-control - ``` - -1. Restart the Neutron server process: - - ```bash - service neutron-server restart - ``` - -## Compute node install - -On each compute node, perform the following steps: - -1. Open `/etc/nova/nova.conf` and remove the line from the `[DEFAULT]` - section that reads: - - ```bash - linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver - ``` - - Remove the lines from the `[neutron]` section setting - `service_neutron_metadata_proxy` or `service_metadata_proxy` to - `True`, if there are any. - - Restart nova compute. - - ```bash - service nova-compute restart - ``` - -1. If they're running, stop the Open vSwitch services: - - ```bash - service openvswitch-switch stop - service neutron-plugin-openvswitch-agent stop - ``` - - Then, prevent the services running if you reboot: - - ```bash - sh -c "echo 'manual' > /etc/init/openvswitch-switch.override" - sh -c "echo 'manual' > /etc/init/openvswitch-force-reload-kmod.override" - sh -c "echo 'manual' > /etc/init/neutron-plugin-openvswitch-agent.override" - ``` - - Then, on your control node, run the following command to find the - agents that you just stopped: - - ```bash - neutron agent-list - ``` - - For each agent, delete them with the following command on your - control node, replacing `` with the ID of the agent: - - ```bash - neutron agent-delete - ``` - -1. Install some extra packages: - - ```bash - apt-get install -y neutron-common neutron-dhcp-agent nova-api-metadata - ``` - -1. Run `apt-get upgrade` and `apt-get dist-upgrade`. These commands - bring in {{prodname}}-specific updates to the OpenStack packages and - to `dnsmasq`. - -1. Edit `/etc/neutron/neutron.conf`. In the `[oslo_concurrency]` section, - ensure that the `lock_path` variable is uncommented and set as follows. - - ``` - # Directory to use for lock files. For security, the specified directory should - # only be writable by the user running the processes that need locking. - # Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, - # a lock path must be set. - lock_path = $state_path/lock - ``` - -1. Install the {{prodname}} DHCP agent (which uses etcd, allowing - it to scale to higher numbers of hosts) and disable the Neutron-provided - one: - - ``` - service neutron-dhcp-agent stop - echo manual | tee /etc/init/neutron-dhcp-agent.override - apt-get install -y calico-dhcp-agent - ``` - -1. Install the `calico-compute` package: - - ```bash - apt-get install -y calico-compute - ``` - - This step may prompt you to save your iptables rules to make them - persistent on restart -- hit yes. - -1. Configure BIRD. By default {{prodname}} assumes that you will deploy a - route reflector to avoid the need for a full BGP mesh. To this end, it - includes configuration scripts to prepare a BIRD config file with a single - peering to the route reflector. If that's correct for your network, you can - run either or both of the following commands. - - For IPv4 connectivity between compute hosts: - - ```bash - calico-gen-bird-conf.sh - ``` - - And/or for IPv6 connectivity between compute hosts: - - ```bash - calico-gen-bird6-conf.sh - ``` - - You will also need to [configure your route reflector to allow connections from the compute node as a route reflector client](../../../networking/configuring/bgp.mdx) -. - - If you _are_ configuring a full BGP mesh you need to handle the BGP - configuration appropriately on each compute host. The scripts above can be - used to generate a sample configuration for BIRD, by replacing the - `` with the IP of one other compute host -- this will - generate the configuration for a single peer connection, which you can - duplicate and update for each compute host in your mesh. - - To maintain connectivity between VMs if BIRD crashes or is upgraded, - configure BIRD graceful restart: - - - Add `-R` to `BIRD_ARGS` in /etc/bird/envvars (you may need to - uncomment this option). - - Edit the upstart jobs /etc/init/bird.conf and bird6.conf (if - you're using IPv6), and add the following script to it. - - ```bash - pre-stop script - PID=`status bird | egrep -oi '([0-9]+)$' | head -n1` - kill -9 $PID - end script - ``` - -1. Create `/etc/calico/felix.cfg` with the following content, where `` is the IP - address of the etcd server. - - ```conf - [global] - DatastoreType = etcdv3 - EtcdAddr = :2379 - ``` - -1. Restart the Felix service. - - ```bash - service calico-felix restart - ``` - - diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/verification.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/installation/verification.mdx deleted file mode 100644 index 6d88e6718a..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/installation/verification.mdx +++ /dev/null @@ -1,175 +0,0 @@ ---- -description: Quick steps to test that your Calico-based OpenStack deployment is running correctly. ---- - -# Verify your deployment - -This document takes you through the steps you can perform to verify that -a {{prodname}}-based OpenStack deployment is running correctly. - -## Prerequisites - -This document requires you have the following things: - -- SSH access to the nodes in your {{prodname}}-based OpenStack deployment. -- Access to an administrator account on your {{prodname}}-based - OpenStack deployment. - -## Procedure - -Begin by creating several instances on your OpenStack deployment using -your administrator account. Confirm that these instances all launch and -correctly obtain IP addresses. - -You'll want to make sure that your new instances are evenly striped -across your hypervisors. On your control node, run: - -```bash -nova list --fields host -``` - -Confirm that there is an even spread across your compute nodes. If there -isn't, it's likely that an error has happened in either nova or {{prodname}} -on the affected compute nodes. Check the logs on those nodes for more -logging, and report your difficulty on the mailing list. - -Now, SSH into one of your compute nodes. We're going to verify that the -FIB on the compute node has been correctly populated by {{prodname}}. To do -that, run the `route` command. You'll get output something like this: - -``` -Kernel IP routing table -Destination Gateway Genmask Flags Metric Ref Use Iface -default net-vl401-hsrp- 0.0.0.0 UG 0 0 0 eth0 -10.65.0.0 * 255.255.255.0 U 0 0 0 ns-b1163e65-42 -10.65.0.103 npt06.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.104 npt09.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.105 * 255.255.255.255 UH 0 0 0 tap242f8163-08 -10.65.0.106 npt09.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.107 npt07.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.108 npt08.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.109 npt07.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.110 npt06.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.111 npt08.datcon.co 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.112 * 255.255.255.255 UH 0 0 0 tap3b561211-dd -link-local * 255.255.0.0 U 1000 0 0 eth0 -172.18.192.0 * 255.255.255.0 U 0 0 0 eth0 -``` - -You'll expect to see one route for each of the VM IP addresses in this -table. For VMs on other compute nodes, you should see that compute -node's IP address (or domain name) as the `gateway`. For VMs on this -compute node, you should see `*` as the `gateway`, and the tap interface -for that VM in the `Iface` field. As long as routes are present to all -VMs, the FIB has been configured correctly. If any VMs are missing from -the routing table, you'll want to verify the state of the BGP -connection(s) from the compute node hosting those VMs. - -Having confirmed the FIB is present and correct, open the console for -one of the VM instances you just created. Confirm that the machine has -external connectivity by pinging `google.com` (or any other host you are -confident is routable and that will respond to pings). Additionally, -confirm it has internal connectivity by pinging the other instances -you've created (by IP). - -If all of these tests behave correctly, your {{prodname}}-based OpenStack -deployment is in good shape. - -## Troubleshooting - -If you find that none of the advice below solves your problems, please -use our diagnostics gathering script to generate diagnostics, and then -raise a GitHub issue against our repository. To generate the diags, run: - -```bash -/usr/bin/calico-diags -``` - -### VMs cannot DHCP - -This can happen if your iptables is configured to have a default DROP -behaviour on the INPUT or FORWARD chains. You can test this by running -`iptables -L -t filter` and checking the output. You should see -something that looks a bit like this: - -``` -Chain INPUT (policy ACCEPT) -target prot opt source destination -ACCEPT all -- anywhere anywhere state RELATED,ESTABLISHED -ACCEPT icmp -- anywhere anywhere -ACCEPT all -- anywhere anywhere -ACCEPT tcp -- anywhere anywhere state NEW tcp dpt:ssh -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain FORWARD (policy ACCEPT) -target prot opt source destination -REJECT all -- anywhere anywhere reject-with icmp-host-prohibited - -Chain OUTPUT (policy ACCEPT) -target prot opt source destination -``` - -The important sections are `Chain INPUT` and `Chain FORWARD`. Each of -those needs to have a policy of `ACCEPT`. In some systems, this policy -may be set to `DENY`. To change it, run `iptables -P ACCEPT`, -replacing `` with either `INPUT` or `FORWARD`. - -Note that doing this may be considered a security risk in some networks. -A future {{prodname}} enhancement will remove the requirement to perform this -step. - -### Routes are missing in the FIB. - -If routes to some VMs aren't present when you run `route`, this suggests -that your BGP sessions are not functioning correctly. Your BGP daemon -should have either an interactive console or a log. Open the relevant -one and check that all of your BGP sessions have come up appropriately -and are replicating routes. If you're using a full mesh configuration, -confirm that you have configured BGP sessions with _all_ other {{prodname}} -nodes. - -### VMs Cannot Ping Non-VM IPs - -Assuming all the routes are present in the FIB (see above), this most -commonly happens because the gateway is not configured with routes to -the VM IP addresses. To get full {{prodname}} functionality the gateway should -also be a BGP peer of the compute nodes (or the route reflector). - -Confirm that your gateway has routes to the VMs. Assuming it does, make -sure that your gateway is also advertising those routes to its external -peers. It may do this using eBGP, but it may also be using some other -routing protocol. - -### VMs Cannot Ping Other VMs - -Before continuing, confirm that the two VMs are in security groups that -allow inbound traffic from each other (or are both in the same security -group which allows inbound traffic from itself). Traffic will not be -routed between VMs that do not allow inbound traffic from each other. - -Assuming that the security group configuration is correct, confirm that -the machines hosting each of the VMs (potentially the same machine) have -routes to both VMs. If they do not, check out the troubleshooting -section [above](#routes-are-missing-in-the-fib). - -### Web UI Shows Error Boxes Saying "Error: Unable to get quota info" and/or "Error: Unable to get volume limit" - -This is likely a problem encountered with mapping devices in `cinder`, -OpenStack's logical volume management component. Many of these can be -resolved by restarting `cinder`. - -```bash -service cinder-volume restart -service cinder-scheduler restart -service cinder-api restart -``` - -### Cannot create instances, error log says "could not open /dev/net/tun: Operation not permitted" - -This is caused by having not restarted libvirt after you add lines to -the end of `/etc/libvirt/qemu.conf`. This can be fixed by either -rebooting your entire system or running: - -```bash -service libvirt-bin restart -``` diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/overview.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/overview.mdx deleted file mode 100644 index c483931df3..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/overview.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -description: Review the Calico components used in an OpenStack deployment. ---- - -# Calico for OpenStack - -{{prodname}}'s integration with OpenStack consists of the following pieces. - -- etcd, providing a distributed key/value database that is accessible from all - compute hosts and Neutron servers. - -- Felix (the {{prodname}} agent) running on each compute host. Felix reads - information from etcd that specifies workloads and their properties (IP - addresses, security etc.), and implements that connectivity and security for - them. Felix also reports its own agent status, and the programming status - for each workload, through etcd. - -- BIRD, also running on each compute host, to propagate local workload routes - to other compute hosts and infrastructure routers. - -- The {{prodname}} driver for Neutron, that runs as part of the Neutron server on - each machine where the Neutron server runs. (There can be just one Neutron - server, but typically there are more, to provide higher availability.) This - driver handles OpenStack network, subnet, instance and security operations - and translates them into equivalent etcd data for Felix to implement. It - also reads the agent and per-port status information that Felix writes into - etcd, and reports this into the Neutron DB. - -- The {{prodname}} DHCP agent, running on each compute host, that configures and - launches Dnsmasq instances to provide DHCP for the locally hosted workloads. - Architecturally this fills the same role as the reference Neutron DHCP agent; - the key difference is that it gets its information from Etcd instead of by - RPC from the Neutron server, as we have found this to be more scalable. - -The Etcd, Felix and BIRD pieces are the same as in other {{prodname}} integrations, -and so independent of OpenStack. The {{prodname}} Neutron driver and DHCP agent are -specific to OpenStack, and are provided by the [networking-calico](https://github.com/projectcalico/networking-calico/) project. - -From an OpenStack point of view, networking-calico is just one of many possible -Neutron drivers that provide connectivity between instances (VMs) as specified -by the Neutron API. Refer to [{{prodname}}'s interpretation of Neutron API calls](../../networking/openstack/neutron-api.mdx) for more detail about the -parts of the Neutron API that the networking-calico provides. diff --git a/calico_versioned_docs/version-3.25/getting-started/openstack/requirements.mdx b/calico_versioned_docs/version-3.25/getting-started/openstack/requirements.mdx deleted file mode 100644 index b87debe924..0000000000 --- a/calico_versioned_docs/version-3.25/getting-started/openstack/requirements.mdx +++ /dev/null @@ -1,20 +0,0 @@ ---- -description: Requirements for installing Calico on OpenStack nodes. ---- - -# System requirements - - - -## OpenStack requirements - -The Calico Neutron driver is written in Python 3 and so requires an OpenStack release that -runs with Python 3. Subject to that, we aim to develop and maintain the Neutron driver -for {{prodname}} (networking-calico) so that its master code works with OpenStack -master or any previous Python 3 release, on any operating system, independently of the -deployment mechanism that is used to install it. - -However, we recommend using OpenStack Ussuri or later, and our active support and testing -of {{prodname}} {{version}} with OpenStack is with Ussuri. - - diff --git a/calico_versioned_docs/version-3.25/network-policy/adopt-zero-trust.mdx b/calico_versioned_docs/version-3.25/network-policy/adopt-zero-trust.mdx deleted file mode 100644 index 0cad41fbe2..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/adopt-zero-trust.mdx +++ /dev/null @@ -1,296 +0,0 @@ ---- -description: Best practices to adopt a zero trust network model to secure workloads and hosts. Learn 5 key requirements to control network access for cloud-native strategy. ---- - -# Adopt a zero trust network model for security - -## Big picture - -Adopting a zero trust network model is best practice for securing workloads and hosts in your cloud-native strategy. - -## Value - -Zero Trust Networks are resilient even when attackers manage to breach applications or infrastructure. They make it hard for attackers to move laterally, and reconnaissance activities easier to spot. - -Organizations that embrace the change control model in this How-To will be able to tightly secure their network without imposing a drag on innovation in their applications. Security teams can be enablers of business value, not roadblocks. - -## Concepts - -### The network is always hostile - -**Zero Trust Networking** is an approach to network security that is unified by the principle that the network is always assumed to be hostile. This is in direct contrast to perimeter and “segmentation” approaches that focus on separating the world into trusted and untrusted network segments. - -Why assume the network is hostile? In many attack scenarios, it is. - -- Attackers may compromise “trusted” parts of your network infrastructure: routers, switches, links, etc. -- Deliberate or accidental misconfiguration can route sensitive traffic over untrusted networks, like the public Internet. -- Other endpoints on a “trusted” network may be compromised: your application may share a network with thousands of other servers, tens of thousands of other containers, thousands of personal laptops, phones, etc. - -Major breaches typically start as a minor compromise of as little as a single component, but attackers then use the network to move laterally toward high value targets: your company’s or customers’ data. In a zone or perimeter model, attackers can move freely inside the perimeter or zone after they have compromised a single endpoint. A Zero Trust Network is resilient to this threat because it enforces strong, cryptographic authentication and access control on each and every network connection. - -### Requirements of a Zero Trust Network - -Zero Trust Networks rely on network access controls with specific requirements: - -**Requirement 1:** All network connections are subject to enforcement (not just those that cross zone boundaries). - -**Requirement 2**: Establishing the identity of a remote endpoint is always based on multiple criteria including strong cryptographic proofs of identity. In particular, network-level identifiers like IP address and port are not sufficient on their own as they can be spoofed by a hostile network. - -**Requirement 3**: All expected and allowed network flows are explicitly allowed. Any connection not explicitly allowed is denied. - -**Requirement 4**: Compromised workloads must not be able to circumvent policy enforcement. - -**Requirement 5**: Many Zero Trust Networks also rely on encryption of network traffic to prevent disclosure of sensitive data to hostile entities snooping network traffic. This is not an absolute requirement if private data are not exchanged over the network, but to fit the criteria of a Zero Trust Network, encryption must be used on every network connection if it is required at all. A Zero Trust Network does not distinguish between trusted and untrusted network links or paths. Also note that even when not using encryption for data privacy, cryptographic proofs of authenticity are still used to establish identity. - -### How {{prodname}} and Istio implement Zero Trust Network requirements - -{{prodname}} works in concert with the Istio service mesh to implement all you need to build a Zero Trust Network in your Kubernetes cluster. - -#### Multiple enforcement points - -When operating with Istio, incoming requests to your workloads traverse two distinct enforcement points: - -1. The host Linux kernel. {{prodname}} policy is enforced in the Linux kernel using iptables at L3-L4. -1. The Envoy proxy. {{prodname}} policy is enforced in the Envoy proxy at L3-7, with requests being cryptographically authenticated. A lightweight policy decision sidecar called Dikastes assists Envoy in this enforcement. - -These multiple enforcement points establish the identity of the remote endpoint based on multiple criteria (Requirement 2). The host Linux kernel enforcement protects your workloads even if the workload pod is compromised and the Envoy proxy bypassed (Requirement 4). - -#### {{prodname}} policy store - -The policies in the {{prodname}} data store encode the allow-list of allowed flows (Requirement 3). - -{{prodname}} network policy is designed to be flexible to fit many different security paradigms, so it can express, for example, both Zero Trust Network-style allow-lists as well as legacy paradigms like zones. You can even layer both of these approaches on top of one another without creating a maintenance mess by composing multiple policy documents. - -The How To section of this document explains how to write policy specifically in the style of Zero Trust Networks. Conceptually, you will begin by denying all network flows by default, then add rules that allow the specific expected flows that make up your application. When you finish, only legitimate application flows are allowed and all others are denied. - -#### {{prodname}} control plane - -The {{prodname}} control plane handles distributing all the policy information from the {{prodname}} data store to each enforcement point, ensuring that all network connections are subject to enforcement (Requirement 4). It translates the high-level declarative policy into the detailed enforcement attributes that change as applications scale up and down to meet demand, and evolve as developers modify them. - -#### Istio Citadel Identity System - -In {{prodname}} and Istio, workload identities are based on Kubernetes Service Accounts. An Istio component called Citadel handles minting cryptographic keys for each Service Account to prove its identity on the network (Requirement 2) and encrypt traffic (Requirement 5). This allows the Zero Trust Network to be resilient even if attackers compromise network infrastructure like routers or links. - -## How to - -This section explains how to establish a Zero Trust Network using {{prodname}} and Istio. It is written from the perspective of platform and security engineers, but should also be useful for individual developers looking to understand the process. - -Building and maintaining a Zero Trust Network is the job of an entire application delivery organization, that is, everyone involved in delivering a networked application to its end users. This includes: - -- Developers, DevOps, and Operators -- Platform Engineers -- Network Engineers -- Security Engineers and Security Operatives - -In particular, the view that developers build applications which they hand off to others to figure out how to secure is incompatible with a Zero Trust Network strategy. To function correctly, a Zero Trust Network needs to be configured with detailed information about expected flows---information that developers are in a unique position to know. - -At a high level, you will undertake the following steps to establish a Zero Trust Network: - -1. Install {{prodname}}. -1. Install Istio and enable {{prodname}} integration. -1. Establish workload identity by using Service Accounts. -1. Write initial allow-list policies for each service. - -After your Zero Trust Network is established, you will need to maintain it. - -### Install {{prodname}} - -Follow the [install instructions](../getting-started/kubernetes/index.mdx) to get {{prodname}} software running in your cluster. - -### Install Istio and enable {{prodname}} integration - -Follow the instructions to [Enable application layer policy](istio/app-layer-policy.mdx). - -The instructions include a “demo” install of Istio for quickly testing out functionality. For a production installation to support a Zero Trust Network, you should instead follow the official Istio install instructions. Be sure to enable mutually authenticated TLS (mTLS) in your install options by setting **global.mtls.enabled** to **true**. - -### Establish workload identity by using Service Accounts - -Our eventual goal is to write access control policy that authorizes individual expected network flows. We want these flows to be scoped as tightly as practical. In a {{prodname}} Zero Trust Network, the cryptographic identities are Kubernetes Service Accounts. Istio handles crypto-key management for you so that each workload can assert its Service Account identity in a secure manner. - -You have some flexibility in how you assign identities for the purpose of your Zero Trust Network policy. The right balance for most people is to use a unique identity for each Kubernetes Service in your application (or Deployment if you have workloads that don’t accept any incoming connections). Assigning identity to entire applications or namespaces is probably too coarse, since applications usually consist of multiple services (or dozens of microservices) with different actual access needs. - -You should assign unique identities to microservices even if you happen to know that they access the same things. Your policy will be more readable if the identities correspond to logical components of the application. You can grant them the same permissions easily, and if in the future they need different permissions it will be easier to handle. - -After you decide on the set of identities you require, create the Kubernetes Service Accounts, then modify your application configuration so that each Deployment, ReplicaSet, StatefulSet, etc. uses the correct Service Account. - -### Write initial allow-list policies for each service - -The final step to establishing your Zero Trust Network is to write the policies for each service in your network. The [Application Layer Policy Tutorial](istio/enforce-policy-istio.mdx) gives an overview of setting up policies that allow traffic based on Service Account identity. - -For each service you will: - -1. Determine the full set of other identities that should access it. -1. Add rules to allow each of those flows. - -After a pod is selected by at least one policy, any traffic not explicitly allowed is denied. This implements the Zero Trust Network paradigm of an explicit allow-list of expected flows. - -### Determine the full set of identities that should access each service - -There are several approaches to determining the set of identities that should access a service. Work with the developers of the application to generate this list and ensure it is correct. One approach is to create a flow diagram of your entire application. A flow diagram is a kind of graph where each identity is a node, and each expected flow is an edge. - -Let’s look at an example application. - -![zero-trust-app](/img/calico/zero-trust-app.png) - -In this example, requests from end-users all flow through a service called api, where they can trigger calls to other services in the backend. These in turn can call other services. Each arrow in this diagram represents an expected flow, and if two services do not have a connecting arrow, the are not expected to have any network communication. For example, the only services that call the post service are api and search. - -For simple applications, especially if they are maintained by a single team, the developers will probably be able to just write down this flow graph from memory or with a quick look at the application code. - -If this is difficult to do from memory, you have several options. - -1. Run the application in a test environment with policy enabled. - a. Look at service logs to see what connectivity has broken. - b. Add rules that allow those flows and iterate until the application functions normally. - c. Move on to the next service and repeat. -1. Collect flow logs from a running instance of your application. Calico Enterprise can be used for this purpose, or the Kiali dashboard that comes with Istio. - a. Process the flow logs to determine the set of flows. - b. Review the logged flows and add rules for each expected flow. -1. Use Calico Enterprise for policy, and put it into logging-only mode. - a. In this mode “denied” connections are logged instead of dropped. - b. Review the “denied” logs and add rules for each expected flow. - -When determining flows from a running application instance, be sure to review each rule you add with application developers to determine if it is legitimate and expected. The last thing you want is for a breach-in-progress to be enshrined as expected flows in policy! - -### Write policies with allow rules for each flow - -After you have the set of expected flows for each service, you are ready to write {{prodname}} network policy to allow-list those flows and deny all others. - -Returning to the example flow graph in the previous section, let’s write the policy for the post service. For the purpose of this example, assume all the services in the application run in a Kubernetes Namespace called microblog. We see from the flow graph that the post service is accessed by the api and search services. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: post-allow-list - namespace: microblog -spec: - selector: svc == 'post' - types: - - Ingress - ingress: - - action: Allow - source: - serviceAccounts: - names: ['api', 'search'] - namespaceSelector: app == 'microblog' - protocol: TCP - destination: - ports: - - 8080 -``` - -Things to notice in this example: - -- **Namespace** - - Create a {{prodname}} NetworkPolicy in the same **namespace** as the service for the allow-list (microblog). - - ```yaml - metadata: - name: post-allow-list - namespace: microblog - ``` - -- **Selectors** - - The selector controls which pods to apply policy. It should be the same selector used to define the Kubernetes Service. - - ```yaml - spec: - selector: svc == 'post' - ``` - -- **Service account by name** - - In the **source:** selector, allow **api** and **search** by name. An alternative to selecting service accounts by name, is by namespaceSelector (next example). - - ```yaml - source: - serviceAccounts: - names: ['api', 'search'] - ``` - -- **Service account by namespaceSelector** - - Service Accounts are uniquely identified by name and namespace. Use a **namespaceSelector** to fully-qualify the Service Accounts you are allowing, so if names are repeated in other namespaces they will not be granted access to the service. - - ```yaml - source: - serviceAccounts: - names: ['api', 'search'] - namespaceSelector: app == 'microblog' - ``` - -- **Rules** - - Scope your rules as tightly as possible. In this case we are allowing connection only on TCP port 8080. - - ```yaml - destination: - ports: - - 8080 - ``` - -The above example lists the identities that need access to the post service by name. This style of allow-list works best when the developers responsible for a service have explicit knowledge of who needs access to their service. - -However, some development teams don’t explicitly know who needs access to their service, and don’t need to know. The service might be very generic and used by lots of different applications across the organization---for example: a logging service. Instead of listing the Service Accounts that get access to the service explicitly one-by-one, you can use a label selector that selects on Service Accounts. - -In the following example, we have changed the **serviceAccount** clause. Instead of a name, we use a label selector. The **selector: svc-post == access** label grants access to the post service. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: post-allow-list - namespace: microblog -spec: - selector: svc == 'post' - types: - - Ingress - ingress: - - action: Allow - source: - serviceAccounts: - selector: svc-post == 'access' - namespaceSelector: app == 'microblog' - protocol: TCP - destination: - ports: - - 8080 -``` - -Define labels that indicate permission to access services in the cluster. Then, modify the ServiceAccounts for each identity that needs access. In this example, we would add the label **svc-post == access** to the **api** and **search** Service Accounts. - -Whether you choose to explicitly name the Service Accounts or use a label selector is up to you, and you can make a different choice for different services. Using explicit names works best for services that have a small number of clients, or when you want the service owner to be involved in the decision to allow something new to access the service. If some other team wants to get access to the service, they call up the owner of the service and ask them to grant access. In contrast, using labels is good when you want more decentralized control. The service owner defines the labels that grant access to the service and trusts the other development teams to label their Service Accounts when they need access. - -### Maintain your zero trust network - -The allow-list policies are tightly scoped to the exact expected flows in the applications running in the Zero Trust Network. If these applications are under active development the expected flows will change, and policy, therefore, also needs to change. Maintaining a Zero Trust Network means instituting a change control policy that ensures: - -- Policies are up to date with application changes -- Policies are tightly scoped to expected flows -- Changes keep up with the pace of application development - -It is difficult to overstate how important the last point is. If your change control process cannot handle the volume of changes, or introduces too much latency in deploying new features, your transition to a Zero Trust Network is very likely to fail. Either your senior leadership will choose business expediency and overrule your security concerns, or competitors that can roll out new versions faster will stifle your market share. On the other hand, if your change control process does keep pace with application development, it will bring security value without sacrificing the pace of innovation. - -The size of the security team is often relatively small compared with application development and operations teams in most organizations. Fortunately, most application changes will not require changes in security policy, but even a small proportion of changes can lead to a large absolute number when dealing with large application teams. For this reason, it is often not feasible for a member of the security team to make every policy change. A classic complaint in large enterprises is that it takes weeks to change a firewall rule---this is often not because the actual workflow is time consuming but because the security team is swamped with a large backlog. - -Therefore, we recommend that the authors of the policy changes be developers/devops (i.e. authorship should “shift left”). This allows your change control process to scale naturally as your applications do. When application authors make changes that require policy changes (say, adding a new microservice), they also make the required policy changes to authorize the network activity associated with it. - -Here is a simplified application delivery pipeline flow. - -![zero-trust-app](/img/calico/zero-trust-deploy.png) - -Developers, DevOps, and/or Operators make changes to applications primarily by making changes to the artifacts at the top of the diagram: the source code and associated deployment configuration. These artifacts are put in source control (e.g. git) and control over changes to the running applications are managed as commits to this source repository. In a Kubernetes environment, the deployment configuration is typically the objects that appear on the Kubernetes API, such as Services and Deployment manifests. - -What you should do is include the NetworkPolicy as part of those deployment config artifacts. In some organizations, these artifacts are in the same repo as the source code, and in others they reside in a separate repo, but the principle is the same: you manage policy change control as commits to the deployment configuration. This config then works its way through the delivery pipeline and is finally applied to the running Kubernetes cluster. - -Your developers will likely require training and support from the security team to get policy correct at first. Many trained developers are not used to thinking about network security. The logical controls expressed in network policy are simple compared with the flexibility they have in source code, so the primary support they will need from you is around the proper security mindset and principles of Zero Trust Networks. You can apply a default deny policy in your cluster to ensure that developers can’t simply forget to apply their own allow-listed policy. - -You may wish to review every security policy change request (aka pull request in git workflows) at first. If you do, then be sure you have time allotted, and consider rolling out Zero Trust Network policies incrementally, one application or service at a time. As development teams gain confidence you can pull back and have them do their own reviews. Security professionals can do spot checks on change requests or entire policies to ensure quality remains high in the long term. - -## Additional resources - -- [Protect hosts](hosts/protect-hosts.mdx) -- [Global network policy](../reference/resources/globalnetworkpolicy.mdx) -- [Network policy](../reference/resources/networkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/crypto-auth.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/crypto-auth.mdx deleted file mode 100644 index 480ed2fabf..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/comms/crypto-auth.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -description: Enable TLS authentication and encryption for various Calico components. ---- - -# Configure encryption and authentication to secure Calico components - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Connections from {{prodname}} components to etcd - - - - -Operator based installations do not required communication to etcd, and so this section does not apply. - - - - -If you are using the etcd datastore, we recommend enabling mutual TLS authentication on -its connections as follows. - -- [Configure etcd](https://coreos.com/etcd/docs/latest/op-guide/security.html) to encrypt its - communications with TLS and require clients to present certificates signed by the etcd certificate - authority. - -- Configure each {{prodname}} component to verify the etcd server's identity and to present - a certificate to the etcd server that is signed by the etcd certificate authority. - - [{{nodecontainer}}](../../reference/configure-calico-node.mdx) - - [`calicoctl`](../../operations/calicoctl/configure/etcd.mdx) - - [CNI plugin](../../reference/configure-cni-plugins.mdx#etcd-location) (Kubernetes and OpenShift only) - - [Kubernetes controllers](../../reference/kube-controllers/configuration.mdx#configuring-datastore-access) (Kubernetes and OpenShift only) - - [Felix](../../reference/felix/configuration.mdx#etcd-datastore-configuration) - - [Typha](../../reference/typha/configuration.mdx#etcd-datastore-configuration) (often deployed in - larger Kubernetes deployments) - - [Neutron plugin or ML2 driver](../../networking/openstack/configuration.mdx#neutron-server-etcneutronneutronconf) (OpenStack only) - - [DHCP agent](../../networking/openstack/configuration.mdx#neutron-server-etcneutronneutronconf) (OpenStack only) - - - - -### Connections from {{prodname}} components to kube-apiserver (Kubernetes and OpenShift) - -We recommend enabling TLS on kube-apiserver, as well as the client certificate and JSON web token (JWT) -authentication modules. This ensures that all of its communications with {{prodname}} components occur -over TLS. The {{prodname}} components present either an X.509 certificate or a JWT to kube-apiserver -so that kube-apiserver can verify their identities. - -### Connections from Felix to Typha (Kubernetes) - - - - -Operator based installations automatically configure mutual TLS authentication on connections from -Felix to Typha. - - - - -We recommend enabling mutual TLS authentication on connections from Felix to Typha. -To do so, you must provision Typha with a server certificate and Felix with a client -certificate. Each service will need the private key associated with their certificate. -In addition, you must configure one of the following. - -- **SPIFFE identifiers** (recommended): Generate a [SPIFFE](https://github.com/spiffe/spiffe) identifier for Felix, - set `ClientURISAN` on Typha to Felix's SPIFFE ID, and include Felix's SPIFFE ID in the `URI SAN` field - of its certificate. Similarly, generate a [SPIFFE](https://github.com/spiffe/spiffe) identifier for Typha, - set `TyphaURISAN` on Felix to Typha's SPIFFE ID, and include Typha's SPIFFE ID in the `URI SAN` field - of its certificate. - -- **Common Name identifiers**: Configure `ClientCN` on Typha to the value in the `Common Name` of Felix's - certificate. Configure `ClientCN` on Felix to the value in the `Common Name` of Typha's - certificate. - -:::tip - -If you are migrating from Common Name to SPIFFE identifiers, you can set both values. -If either matches, the communication succeeds. - -::: - -Here is an example of how you can secure the Felix-Typha communications in your -cluster: - -1. Choose a certificate authority, or set up your own. - -1. Obtain or generate the following leaf certificates, signed by that - authority, and corresponding keys: - - - A certificate for each Felix with Common Name `typha-client` and - extended key usage `ClientAuth`. - - - A certificate for each Typha with Common Name `typha-server` and - extended key usage `ServerAuth`. - -1. Configure each Typha with: - - - `CAFile` pointing to the certificate authority certificate - - - `ServerCertFile` pointing to that Typha's certificate - - - `ServerKeyFile` pointing to that Typha's key - - - `ClientCN` set to `typha-client` - - - `ClientURISAN` unset. - -1. Configure each Felix with: - - - `TyphaCAFile` pointing to the Certificate Authority certificate - - - `TyphaCertFile` pointing to that Felix's certificate - - - `TyphaKeyFile` pointing to that Felix's key - - - `TyphaCN` set to `typha-server` - - - `TyphaURISAN` unset. - -For a [SPIFFE](https://github.com/spiffe/spiffe)-compliant deployment you can -follow the same procedure as above, except: - -1. Choose [SPIFFE Identities](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#2-spiffe-identity) - - to represent Felix and Typha. - -1. When generating leaf certificates for Felix and Typha, put the relevant - SPIFFE Identity in the certificate as a URI SAN. - -1. Leave `ClientCN` and `TyphaCN` unset. - -1. Set Typha's `ClientURISAN` parameter to the SPIFFE Identity for Felix that - you use in each Felix certificate. - -1. Set Felix's `TyphaURISAN` parameter to the SPIFFE Identity for Typha. - -For detailed reference information on these parameters, refer to: - -- **Typha**: [Felix-Typha TLS configuration](../../reference/typha/configuration.mdx#felix-typha-tls-configuration) - -- **Felix**: [Felix-Typha TLS configuration](../../reference/felix/configuration.mdx#felix-typha-tls-configuration) - - - diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/index.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/index.mdx deleted file mode 100644 index 24e13343f5..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/comms/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Secure communications for Calico components. -hide_table_of_contents: true ---- - -# Secure Calico component communications - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/reduce-nodes.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/reduce-nodes.mdx deleted file mode 100644 index 9028ca8d27..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/comms/reduce-nodes.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: Configure the Calico Typha TCP port. ---- - -# Schedule Typha for scaling to well-known nodes - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Schedule Typha to well-known nodes. - -## Value - -By scheduling Typha to well-known nodes, you can reduce the number of nodes which expose -Typha's listen port. - -## Concepts - -### Typha - -Typha is a {{prodname}} component which improves scalability and reduces the impact that -large clusters may have on the Kubernetes API. Typha agents must accept connections from other agents on a fixed port. - -As part of the {{prodname}} bootstrap infrastructure, Typha must be available before -pod networking begins and uses host networking instead. It opens a port on the node it is -scheduled on. By default, it can get scheduled to any node and opens TCP 5473. - -## How to - -### Tell if you have installed Typha - - - - -Operator based installations always include Typha. - - - - -Check if the `calico-typha` deployment exists in the `kube-system` namespace. - -``` -kubectl get deployment -n kube-system calico-typha -``` - - - - -### Schedule Typha to well-known nodes - - - - -You can use the Installation API to configure a node affinity for Typha pods. The operator supports both -`preferredDuringSchedulingIgnoredDuringExecution` and `requiredDuringSchedulingIgnoredDuringExecution` options. - -For example, to require the scheduler to place Typha on nodes with the label "typha=allowed": - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - typhaAffinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - matchExpressions: - - key: typha - operator: In - values: - - allowed -``` - - - - -See [scheduling Typha to well-known nodes](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/). - - - diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/secure-bgp.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/secure-bgp.mdx deleted file mode 100644 index 368b00fdf4..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/comms/secure-bgp.mdx +++ /dev/null @@ -1,185 +0,0 @@ ---- -description: Configure BGP passwords to prevent attackers from injecting false routing information. ---- - -# Secure BGP sessions - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Use BGP passwords to prevent attackers from injecting false routing information. - -## Value - -Setting a password on a BGP peering between BGP speakers means that a peering will only -work when both ends of the peering have the same password. This provides a layer of defense -against an attacker impersonating an external BGP peer or a workload in the cluster, for -example to inject malicious routing information into the cluster. - -## Concepts - -### Password protection on BGP sessions - -Password protection is a [standardized](https://tools.ietf.org/html/rfc5925) optional -feature of BGP sessions. The effect is that the two peers at either end of a BGP session -can only communicate, and exchange routing information, if they are both configured with -the same password. - -Please note that password use does not cause the data exchange to be _encrypted_. It -remains relatively easy to _eavesdrop_ on the data exchange, but not to _inject_ false -information. - -### Using Kubernetes secrets to store passwords - -In Kubernetes, the Secret resource is designed for holding sensitive information, -including passwords. Therefore, for this {{prodname}} feature, we use Secrets to -store BGP passwords. - -## How to - -To use a password on a BGP peering: - -1. Create (or update) a Kubernetes secret in the namespace where {{noderunning}} is - running, so that it has a key whose value is the desired password. Note the secret - name and the key name. - - :::note - - BGP passwords must be 80 characters or fewer. If a - password longer than that is configured, the BGP sessions with - that password will fail to be established. - - ::: - -1. Ensure that {{noderunning}} has RBAC permissions to access that secret. - -1. Specify the secret and key name on the relevant BGPPeer resource. - -### Create or update Kubernetes secret - -For example: - -``` -kubectl create -f - < - - -When [configuring a BGP peer](../../networking/configuring/bgp.mdx), -include the secret and key name in the specification of the BGPPeer resource, like this: - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: bgppeer-global-3040 -spec: - peerIP: 192.20.30.40 - asNumber: 64567 - password: - secretKeyRef: - name: bgp-secrets - key: rr-password -``` - - - - -Include the secret in the default [BGP configuration](../../reference/resources/bgpconfig.mdx) -similar to the following: - -```yaml -kind: BGPConfiguration -apiVersion: projectcalico.org/v3 -metadata: - name: default -spec: - logSeverityScreen: Info - nodeToNodeMeshEnabled: true - nodeMeshPassword: - secretKeyRef: - name: bgp-secrets - key: rr-password -``` - -:::note - -Node to node mesh must be enabled to set node to node mesh -BGP password. - -::: - - - - - -## Additional resources - -For more detail about the BGPPeer resource, see -[BGPPeer](../../reference/resources/bgppeer.mdx). - -For more on configuring BGP peers, see [configuring BGP peers](../../networking/configuring/bgp.mdx) -. diff --git a/calico_versioned_docs/version-3.25/network-policy/comms/secure-metrics.mdx b/calico_versioned_docs/version-3.25/network-policy/comms/secure-metrics.mdx deleted file mode 100644 index 931a3a0a2f..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/comms/secure-metrics.mdx +++ /dev/null @@ -1,512 +0,0 @@ ---- -description: Limit access to Calico metric endpoints using network policy. ---- - -# Secure Calico Prometheus endpoints - -## About securing access to {{prodname}}'s metrics endpoints - -When using {{prodname}} with Prometheus metrics enabled, we recommend using network policy -to limit access to {{prodname}}'s metrics endpoints. - -## Prerequisites - -- {{prodname}} is installed with Prometheus metrics reporting enabled. -- `calicoctl` is [installed in your PATH and configured to access the data store](../../operations/calicoctl/install.mdx). - -## Choosing an approach - -This guide provides two example workflows for creating network policies to limit access -to {{prodname}}'s Prometheus metrics. Choosing an approach depends on your requirements. - -- [Using a deny-list approach](#using-a-deny-list-approach) - - This approach allows all traffic to your hosts by default, but lets you limit access to specific ports using - {{prodname}} policy. This approach allows you to restrict access to specific ports, while leaving other - host traffic unaffected. - -- [Using an allow-list approach](#using-an-allow-list-approach) - - This approach denies traffic to and from your hosts by default, and requires that all - desired communication be explicitly allowed by a network policy. This approach is more secure because - only explicitly-allowed traffic will get through, but it requires you to know all the ports that should be open on the host. - -## Using a deny-list approach - -### Overview - -The basic process is as follows: - -1. Create a default network policy that allows traffic to and from your hosts. -1. Create host endpoints for each node that you'd like to secure. -1. Create a network policy that denies unwanted traffic to the {{prodname}} metrics endpoints. -1. Apply labels to allow access to the Prometheus metrics. - -### Example for {{nodecontainer}} - -This example shows how to limit access to the {{nodecontainer}} Prometheus metrics endpoints. - -1. Create a default network policy to allow host traffic - - First, create a default-allow policy. Do this first to avoid a drop in connectivity when adding the host endpoints - later, since host endpoints with no policy default to deny. - - To do this, create a file named `default-host-policy.yaml` with the following contents. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: default-host - spec: - # Select all {{prodname}} nodes. - selector: running-calico == "true" - order: 5000 - ingress: - - action: Allow - egress: - - action: Allow - ``` - - Then, use `calicoctl` to apply this policy. - - ```bash - calicoctl apply -f default-host-policy.yaml - ``` - -1. List the nodes on which {{prodname}} is running with the following command. - - ```bash - calicoctl get nodes - ``` - - In this case, we have two nodes in the cluster. - - ``` - NAME - kubeadm-master - kubeadm-node-0 - ``` - -1. Create host endpoints for each {{prodname}} node. - - Create a file named `host-endpoints.yaml` containing a host endpoint for each node listed - above. In this example, the contents would look like this. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-master.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-master - interfaceName: eth0 - expectedIPs: - - 10.100.0.15 - --- - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-node-0.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-node-0 - interfaceName: eth0 - expectedIPs: - - 10.100.0.16 - ``` - - In this file, replace `eth0` with the desired interface name on each node, and populate the - `expectedIPs` section with the IP addresses on that interface. - - Note the use of a label to indicate that this host endpoint is running {{prodname}}. The - label matches the selector of the network policy created in step 1. - - Then, use `calicoctl` to apply the host endpoints with the following command. - - ```bash - calicoctl apply -f host-endpoints.yaml - ``` - -1. Create a network policy that restricts access to the {{nodecontainer}} Prometheus metrics port. - - Now let's create a network policy that limits access to the Prometheus metrics port such that - only endpoints with the label `calico-prometheus-access: true` can access the metrics. - - To do this, create a file named `calico-prometheus-policy.yaml` with the following contents. - - ```yaml - # Allow traffic to Prometheus only from sources that are - # labeled as such, but don't impact any other traffic. - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: restrict-calico-node-prometheus - spec: - # Select all {{prodname}} nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - # Deny anything that tries to access the Prometheus port - # but that doesn't match the necessary selector. - - action: Deny - protocol: TCP - source: - notSelector: calico-prometheus-access == "true" - destination: - ports: - - 9091 - ``` - - This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress deny rule. - The ingress rule denies traffic to port 9091 unless the source of traffic has the label `calico-prometheus-access: true`, meaning - all {{prodname}} workload endpoints, host endpoints, and global network sets that do not have the label, as well as any - other network endpoints unknown to {{prodname}}. - - Then, use `calicoctl` to apply this policy. - - ```bash - calicoctl apply -f calico-prometheus-policy.yaml - ``` - -1. Apply labels to any endpoints that should have access to the metrics. - - At this point, only endpoints that have the label `calico-prometheus-access: true` can reach - {{prodname}}'s Prometheus metrics endpoints on each node. To grant access, simply add this label to the - desired endpoints. - - For example, to allow access to a Kubernetes pod you can run the following command. - - ```bash - kubectl label pod my-prometheus-pod calico-prometheus-access=true - ``` - - If you would like to grant access to a specific IP network, you - can create a [global network set](../../reference/resources/globalnetworkset.mdx) using `calicoctl`. - - For example, you might want to grant access to your management subnets. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkSet - metadata: - name: calico-prometheus-set - labels: - calico-prometheus-access: 'true' - spec: - nets: - - 172.15.0.0/24 - - 172.101.0.0/24 - ``` - -### Additional steps for Typha deployments - -If your {{prodname}} installation uses the Kubernetes API datastore and has greater than 50 nodes, it is likely -that you have installed Typha. This section shows how to use an additional network policy to secure the Typha -Prometheus endpoints. - -After following the steps above, create a file named `typha-prometheus-policy.yaml` with the following contents. - -```yaml -# Allow traffic to Prometheus only from sources that are -# labeled as such, but don't impact any other traffic. -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: restrict-calico-node-prometheus -spec: - # Select all {{prodname}} nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - # Deny anything that tries to access the Prometheus port - # but that doesn't match the necessary selector. - - action: Deny - protocol: TCP - source: - notSelector: calico-prometheus-access == "true" - destination: - ports: - - 9093 -``` - -This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress deny rule. -The ingress rule denies traffic to port 9093 unless the source of traffic has the label `calico-prometheus-access: true`, meaning -all {{prodname}} workload endpoints, host endpoints, and global network sets that do not have the label, as well as any -other network endpoints unknown to {{prodname}}. - -Then, use `calicoctl` to apply this policy. - -```bash -calicoctl apply -f typha-prometheus-policy.yaml -``` - -### Example for kube-controllers - -If your {{prodname}} installation exposes metrics from kube-controllers, you can limit access to those metrics -with the following network policy. - -Create a file named `kube-controllers-prometheus-policy.yaml` with the following contents. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: restrict-kube-controllers-prometheus - namespace: calico-system -spec: - # Select kube-controllers. - selector: k8s-app == "calico-kube-controllers" - order: 500 - types: - - Ingress - ingress: - # Deny anything that tries to access the Prometheus port - # but that doesn't match the necessary selector. - - action: Deny - protocol: TCP - source: - notSelector: calico-prometheus-access == "true" - destination: - ports: - - 9094 -``` - -:::note - -The above policy is installed in the calico-system namespace. If your cluster has {{prodname}} installed -in the kube-system namespace, you will need to create the policy in that namespace instead. - -::: - -Then, use `calicoctl` to apply this policy. - -```bash -calicoctl apply -f kube-controllers-prometheus-policy.yaml -``` - -## Using an allow-list approach - -### Overview - -The basic process is as follows: - -1. Create host endpoints for each node that you'd like to secure. -1. Create a network policy that allows desired traffic to the {{prodname}} metrics endpoints. -1. Apply labels to allow access to the Prometheus metrics. - -### Example for {{nodecontainer}} - -1. List the nodes on which {{prodname}} is running with the following command. - - ```bash - calicoctl get nodes - ``` - - In this case, we have two nodes in the cluster. - - ``` - NAME - kubeadm-master - kubeadm-node-0 - ``` - -1. Create host endpoints for each {{prodname}} node. - - Create a file named `host-endpoints.yaml` containing a host endpoint for each node listed - above. In this example, the contents would look like this. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-master.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-master - interfaceName: eth0 - expectedIPs: - - 10.100.0.15 - --- - apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: kubeadm-node-0.eth0 - labels: - running-calico: 'true' - spec: - node: kubeadm-node-0 - interfaceName: eth0 - expectedIPs: - - 10.100.0.16 - ``` - - In this file, replace `eth0` with the desired interface name on each node, and populate the - `expectedIPs` section with the IP addresses on that interface. - - Note the use of a label to indicate that this host endpoint is running {{prodname}}. The - label matches the selector of the network policy created in step 1. - - Then, use `calicoctl` to apply the host endpoints with the following command. This will prevent all - traffic to and from the host endpoints. - - ```bash - calicoctl apply -f host-endpoints.yaml - ``` - - :::note - - {{prodname}} allows some traffic as a failsafe even after applying this policy. This can - be adjusted using the `failsafeInboundHostPorts` and `failsafeOutboundHostPorts` options - on the [FelixConfiguration resource](../../reference/resources/felixconfig.mdx). - - ::: - -1. Create a network policy that allows access to the {{nodecontainer}} Prometheus metrics port. - - Now let's create a network policy that allows access to the Prometheus metrics port such that - only endpoints with the label `calico-prometheus-access: true` can access the metrics. - - To do this, create a file named `calico-prometheus-policy.yaml` with the following contents. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkPolicy - metadata: - name: restrict-calico-node-prometheus - spec: - # Select all {{prodname}} nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - # Allow traffic from selected sources to the Prometheus port. - - action: Allow - protocol: TCP - source: - selector: calico-prometheus-access == "true" - destination: - ports: - - 9091 - ``` - - This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress allow rule. - The ingress rule allows traffic to port 9091 from any source with the label `calico-prometheus-access: true`, meaning - all {{prodname}} workload endpoints, host endpoints, and global network sets that have the label will be allowed access. - - Then, use `calicoctl` to apply this policy. - - ```bash - calicoctl apply -f calico-prometheus-policy.yaml - ``` - -1. Apply labels to any endpoints that should have access to the metrics. - - At this point, only endpoints that have the label `calico-prometheus-access: true` can reach - {{prodname}}'s Prometheus metrics endpoints on each node. To grant access, simply add this label to the - desired endpoints. - - For example, to allow access to a Kubernetes pod you can run the following command. - - ```bash - kubectl label pod my-prometheus-pod calico-prometheus-access=true - ``` - - If you would like to grant access to a specific IP address in your network, you - can create a [global network set](../../reference/resources/globalnetworkset.mdx) using `calicoctl`. - - For example, creating the following network set would grant access to a host with IP 172.15.0.101. - - ```yaml - apiVersion: projectcalico.org/v3 - kind: GlobalNetworkSet - metadata: - name: calico-prometheus-set - labels: - calico-prometheus-access: 'true' - spec: - nets: - - 172.15.0.101/32 - ``` - -### Additional steps for Typha deployments - -If your {{prodname}} installation uses the Kubernetes API datastore and has greater than 50 nodes, it is likely -that you have installed Typha. This section shows how to use an additional network policy to secure the Typha -Prometheus endpoints. - -After following the steps above, create a file named `typha-prometheus-policy.yaml` with the following contents. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: restrict-typha-prometheus -spec: - # Select all {{prodname}} nodes. - selector: running-calico == "true" - order: 500 - types: - - Ingress - ingress: - - action: Allow - protocol: TCP - source: - selector: calico-prometheus-access == "true" - destination: - ports: - - 9093 -``` - -This policy selects all endpoints that have the label `running-calico: true`, and enforces a single ingress allow rule. -The ingress rule allows traffic to port 9093 from any source with the label `calico-prometheus-access: true`, meaning -all {{prodname}} workload endpoints, host endpoints, and global network sets that have the label will be allowed access. - -Then, use `calicoctl` to apply this policy. - -```bash -calicoctl apply -f typha-prometheus-policy.yaml -``` - -### Example for kube-controllers - -If your {{prodname}} installation exposes metrics from kube-controllers, you can limit access to those metrics -with the following network policy. - -Create a file named `kube-controllers-prometheus-policy.yaml` with the following contents. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: restrict-kube-controllers-prometheus - namespace: calico-system -spec: - selector: k8s-app == "calico-kube-controllers" - order: 500 - types: - - Ingress - ingress: - - action: Allow - protocol: TCP - source: - selector: calico-prometheus-access == "true" - destination: - ports: - - 9094 -``` - -Then, use `calicoctl` to apply this policy. - -```bash -calicoctl apply -f kube-controllers-prometheus-policy.yaml -``` diff --git a/calico_versioned_docs/version-3.25/network-policy/encrypt-cluster-pod-traffic.mdx b/calico_versioned_docs/version-3.25/network-policy/encrypt-cluster-pod-traffic.mdx deleted file mode 100644 index 5b43b0b9e8..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/encrypt-cluster-pod-traffic.mdx +++ /dev/null @@ -1,263 +0,0 @@ ---- -description: Enable WireGuard for state-of-the-art cryptographic security between pods for Calico clusters. ---- - -# Encrypt in-cluster pod traffic - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Enable WireGuard to secure on-the-wire, in-cluster pod traffic in a {{prodname}} cluster. - -## Value - -When this feature is enabled, {{prodname}} automatically creates and manages WireGuard tunnels between nodes providing transport-level security for on-the-wire, in-cluster pod traffic. WireGuard provides [formally verified](https://www.wireguard.com/formal-verification/) secure and [performant tunnels](https://www.wireguard.com/performance/) without any specialized hardware. For a deep dive in to WireGuard implementation, see this [white paper](https://www.wireguard.com/papers/wireguard.pdf). - -{{prodname}} supports WireGuard encryption for both IPv4 and IPv6 traffic. These can be independently enabled in the FelixConfiguration resource: `wireguardEnabled` -enables encrypting IPv4 traffic over an IPv4 underlay network and `wireguardEnabledV6` enables encrypting IPv6 traffic over an IPv6 underlay network. - -## Before you begin... - -**Terminology** - - - Inter-node pod traffic: Traffic leaving a pod from one node destined to a pod on another node - - Inter-node, host-network traffic: traffic generated by the node itself or a host-networked-pod destined to another node or host-networked-pod - - Same-node pod traffic: Traffic between pods on the same node - -The following platforms: - -- Kubernetes, on-premises -- EKS using Calico CNI -- EKS using AWS CNI -- AKS using Azure CNI - -**Supported encryption** - -- Encryption for inter-node pod traffic -- Encryption for inter-node, host-network traffic - supported only on managed clusters deployed on EKS and AKS - -**Required** - -- On all nodes in the cluster that you want to participate in {{prodname}} encryption, verify that the operating system(s) on the nodes are [installed with WireGuard](https://www.wireguard.com/install/). - - :::note - - Some node operating systems do not support Wireguard, or do not have it installed by default. Enabling {{prodname}} Wireguard encryption does not require all nodes to be installed with Wireguard. However, traffic to or from a node that does not have Wireguard installed will not be encrypted. - - ::: - -- IP addresses for every node in the cluster. This is required to establish secure tunnels between the nodes. {{prodname}} can automatically do this using [IP autodetection methods](../networking/ipam/ip-autodetection.mdx). - -## How to - -- [Install WireGuard](#install-wireguard) -- [Enable WireGuard for a cluster](#enable-wireguard-for-a-cluster) -- [Disable WireGuard for an individual node](#disable-wireguard-for-an-individual-node) -- [Verify configuration](#verify-configuration) -- [Disable WireGuard for a cluster](#disable-wireguard-for-a-cluster) - -### Install WireGuard - -WireGuard is included in Linux 5.6+ kernels, and has been backported to earlier Linux kernels in some Linux distributions. - -Install WireGuard on cluster nodes using [instructions for your operating system](https://www.wireguard.com/install/). Note that you may need to reboot your nodes after installing WireGuard to make the kernel modules available on your system. - -Use the following instructions for these platforms that are not listed on the WireGuard installation page, before proceeding to [enabling WireGuard](#enable-wireguard-for-a-cluster). - - - - -To install WireGuard on the default Amazon Machine Image (AMI): - -```bash - sudo yum install kernel-devel-`uname -r` -y - sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm -y - sudo curl -o /etc/yum.repos.d/jdoss-wireguard-epel-7.repo https://copr.fedorainfracloud.org/coprs/jdoss/wireguard/repo/epel-7/jdoss-wireguard-epel-7.repo - sudo yum install wireguard-dkms wireguard-tools -y -``` - - - - -AKS cluster nodes run Ubuntu with a kernel that has WireGuard installed already, so there is no manual installation required. - - - - -To install WireGuard for OpenShift v4.8: - -1. Install requirements: - - - [CoreOS Butane](https://coreos.github.io/butane/getting-started/) - - [Openshift CLI](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html) - -1. Download and configure the tools needed for kmods. - -```bash -FAKEROOT=$(mktemp -d) -git clone https://github.com/tigera/kmods-via-containers -cd kmods-via-containers -make install FAKEROOT=${FAKEROOT} -cd .. -git clone https://github.com/tigera/kvc-wireguard-kmod -cd kvc-wireguard-kmod -make install FAKEROOT=${FAKEROOT} -cd .. -``` - -1. Configure/edit `${FAKEROOT}/root/etc/kvc/wireguard-kmod.conf`. - - a. You must then set the URLs for the `KERNEL_CORE_RPM`, `KERNEL_DEVEL_RPM` and `KERNEL_MODULES_RPM` packages in the conf file `$FAKEROOT/etc/kvc/wireguard-kmod.conf`. Obtain copies for `kernel-core`, `kernel-devel`, and `kernel-modules` rpms from [RedHat Access](https://access.redhat.com/downloads/content/package-browser) and host it in an http file server that is reachable by your OCP workers. - - b. For help configuring `kvc-wireguard-kmod/wireguard-kmod.conf` and Wireguard version to kernel version compatibility, see the [kvc-wireguard-kmod README file](https://github.com/tigera/kvc-wireguard-kmod#quick-config-variables-guide). - -1. Get RHEL Entitlement data from your own RHEL8 system from a host in your cluster. - - ```bash - tar -czf subs.tar.gz /etc/pki/entitlement/ /etc/rhsm/ /etc/yum.repos.d/redhat.repo - ``` - -1. Copy the `subs.tar.gz` file to your workspace and then extract the contents using the following command. - - ```bash - tar -x -C ${FAKEROOT}/root -f subs.tar.gz - ``` - -1. Transpile your machine config using [CoreOS Butane](https://coreos.github.io/butane/getting-started/). - - ```bash - cd kvc-wireguard-kmod - make ignition FAKEROOT=${FAKEROOT} > mc-wg.yaml - ``` - -1. With the KUBECONFIG set for your cluster, run the following command to apply the MachineConfig which will install WireGuard across your cluster. - ```bash - oc create -f mc-wg.yaml - ``` - - - - -### Enable WireGuard for a cluster - - - - -Enable IPv4 WireGuard encryption across all the nodes using the following command. - -```bash -kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true}}' -``` - -Enable IPv6 WireGuard encryption across all the nodes using the following command. - -```bash -kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabledV6":true}}' -``` - -To enable both IPv4 and IPv6 WireGuard encryption across all the nodes, use the following command. - -```bash -kubectl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}' -``` - - - - -Enable IPv4 WireGuard encryption across all the nodes using the following command. - -```bash -calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true}}' -``` - -Enable IPv6 WireGuard encryption across all the nodes using the following command. - -```bash -calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabledV6":true}}' -``` - -To enable both IPv4 and IPv6 WireGuard encryption across all the nodes, use the following command. - -```bash -calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":true,"wireguardEnabledV6":true}}' -``` - -**Perform the next step for EKS and AKS clusters only, and only if your cluster is using the cloud provider CNI plugin and not Calico CNI.** Enable WireGuard encryption for direct node-to-node communications using the following command. - -```bash -calicoctl patch felixconfiguration default --type='merge' -p '{"spec": {"wireguardHostEncryptionEnabled": true}}' -``` - - - - -For OpenShift, add the Felix configuration with WireGuard enabled [under custom resources](../getting-started/kubernetes/openshift/installation.mdx#optionally-provide-additional-configuration). - -:::note - -The above command can be used to change other WireGuard attributes. For a list of other WireGuard parameters and configuration evaluation, see the [Felix configuration](../reference/resources/felixconfig.mdx#felix-configuration-definition). - -::: - -:::note - -`natOutgoing: true` is set for the default IPv4 IP pool, but not so for IPv6. Wireguard requires `natOutgoing` to be enabled in both IPv4 and IPv6, so [enable NAT outgoing for the IPv6 IP pools](../networking/configuring/workloads-outside-cluster.mdx) when using IPv6 Wireguard. - -::: - -We recommend that you review and modify the MTU used by {{prodname}} networking when WireGuard is enabled to increase network performance. Follow the instructions in the [Configure MTU to maximize network performance](../networking/configuring/mtu.mdx) guide to set the MTU to a value appropriate for your network. - -### Disable WireGuard for an individual node - -To disable WireGuard on a specific node with WireGuard installed, modify the node-specific Felix configuration. e.g., to turn off encryption for pod traffic on node `my-node`, use the following command. This command disables WireGuard for both IPv4 and IPv6, modify it accordingly if disabling only either IP version: - -```bash -cat < -o yaml - ... - status: - ... - wireguardPublicKey: jlkVyQYooZYzI2wFfNhSZez5eWh44yfq1wKVjLvSXgY= - wireguardPublicKeyV6: hTnWXGM4qk/Z8fQgyGFdpPd4qM9QGR2ey30s31yC6g4= - ... -``` - -### Disable WireGuard for a cluster - -To disable WireGuard on all nodes modify the default Felix configuration. For example: - -```bash - calicoctl patch felixconfiguration default --type='merge' -p '{"spec":{"wireguardEnabled":false,"wireguardEnabledV6":false}}' -``` - -## Additional resources - -- [Secure Calico component communications](comms/index.mdx) -- [Configure MTU to maximize network performance](../networking/configuring/mtu.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/defend-dos-attack.mdx b/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/defend-dos-attack.mdx deleted file mode 100644 index 8b3171d435..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/defend-dos-attack.mdx +++ /dev/null @@ -1,107 +0,0 @@ ---- -description: Define DoS mitigation rules in Calico policy to quickly drop connections when under attack. Learn how rules use eBPF and XDP, including hardware offload when available. ---- - -# Defend against DoS attacks - -## Big picture - -Calico automatically enforces specific types of deny-list policies at the earliest possible point in the packet processing pipeline, including offloading to NIC hardware whenever possible. - -## Value - -During a DoS attack, a cluster can receive massive numbers of connection requests from attackers. The faster these connection requests are dropped, the less flooding and overloading to your hosts. When you define DoS mitigation rules in Calico network policy, Calico enforces the rules as efficiently as possible to minimize the impact. - -## Concepts - -### Earliest packet processing - -The earliest point in the packet processing pipeline that packets can be dropped, depends on the Linux kernel version and the capabilities of the NIC driver and NIC hardware. Calico automatically uses the fastest available option. - -| Processed by... | Used by Calico if... | Performance | -| --------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------- | -| NIC hardware | The NIC supports **XDP offload** mode. | Fastest | -| NIC driver | The NIC driver supports **XDP native** mode. | Faster | -| Kernel | The kernel supports **XDP generic mode** and Calico is configured to explicitly use it. This mode is rarely used and has no performance benefits over iptables raw mode below. To enable, see [Felix Configuration](../../reference/resources/felixconfig.mdx). | Fast | -| Kernel | If none of the modes above are available, **iptables raw** mode is used. | Fast | - -:::note - -XDP modes require Linux kernel v4.16 or later. - -::: - -## How to - -The high-level steps to defend against a DoS attack are: - -- [Step 1: Create host endpoints](#step-1-create-host-endpoints) -- [Step 2: Add CIDRs to deny-list in a global network set](#step-2-add-cidrs-to-deny-list-in-a-global-network-set) -- [Step 3: Create deny incoming traffic global network policy](#step-3-create-deny-incoming-traffic-global-network-policy) - -### Best practice - -The following steps walk through the above required steps, assuming no prior configuration is in place. A best practice is to proactively do these steps before an attack (create the host endpoints, network policy, and global network set). In the event of a DoS attack, you can quickly respond by just adding the CIDRs that you want to deny-list to the global network set. - -### Step 1: Create host endpoints - -First, you create the HostEndpoints corresponding to the network interfaces where you want to enforce DoS mitigation rules. In the following example, the HostEndpoint secures the interface named **eth0** with IP **10.0.0.1** on node **jasper**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: production-host - labels: - apply-dos-mitigation: 'true' -spec: - interfaceName: eth0 - node: jasper - expectedIPs: ['10.0.0.1'] -``` - -### Step 2: Add CIDRs to deny-list in a global network set - -Next, you create a Calico **GlobalNetworkset**, adding the CIDRs that you want to deny-list. In the following example, the global network set deny-lists the CIDR ranges **1.2.3.4/32** and **5.6.0.0/16**: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: dos-mitigation - labels: - dos-deny-list: 'true' -spec: - nets: - - '1.2.3.4/32' - - '5.6.0.0/16' -``` - -### Step 3: Create deny incoming traffic global network policy - -Finally, create a Calico GlobalNetworkPolicy adding the GlobalNetworkSet label (**dos-deny-list** in the previous step) as a selector to deny ingress traffic. To more quickly enforce the denial of forwarded traffic to the host at the packet level, use the **doNotTrack** and **applyOnForward** options. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: dos-mitigation -spec: - selector: apply-dos-mitigation == 'true' - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Deny - source: - selector: dos-deny-list == 'true' -``` - -## Additional resources - -- [Global network sets](../../reference/resources/globalnetworkset.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Create a host endpoint](../../reference/resources/hostendpoint.mdx) -- [Introduction to XDP](https://www.iovisor.org/technology/xdp) -- [Advanced XDP documentation](https://prototype-kernel.readthedocs.io/en/latest/networking/XDP/index.html) diff --git a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/high-connection-workloads.mdx b/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/high-connection-workloads.mdx deleted file mode 100644 index d005bd9100..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/high-connection-workloads.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: Create a Calico network policy rule to bypass Linux conntrack for traffic to workloads that experience extremely large number of connections. ---- - -# Enable extreme high-connection workloads - -## Big picture - -Use a {{prodname}} network policy rule to bypass Linux conntrack for traffic to workloads that experience extremely large number of connections. - -## Value - -When the number of connections on a node exceeds the number of connections that Linux conntrack can track, connections can be rejected or dropped. {{prodname}} network policy can be used to selectively bypass Linux conntrack for traffic to/from these types of workloads. - -## Concepts - -### Linux conntrack - -Connection tracking (“conntrack”) is a core feature of the Linux kernel’s networking stack. It allows the kernel to keep track of all logical network connections or flows, and thereby identify all of the packets that make up each flow so they can be handled consistently together. Conntrack is an essential part of the mainline Linux network processing pipeline, normally improving performance, and enabling NAT and stateful access control. - -### Extreme high-connection workloads - -Some niche workloads handling extremely high number of simultaneous connections, or very high rate of short lived connections, can exceed the maximum number of connections Linux conntrack is able to track. One real world example of such a workload is an extreme scale memcached server handling 50k+ connections per second. - -### {{prodname}} doNotTrack network policy - -The {{prodname}} global network policy option, **doNotTrack**, indicates to apply the rules in the policy before connection tracking, and that packets allowed by these rules should not be tracked. The policy is applied early in the Linux packet processing pipeline, before any regular network policy rules, and independent of the policy order field. - -Unlike normal network policy rules, doNotTrack network policy rules are stateless, meaning you must explicitly specify rules to allow return traffic that would normally be automatically allowed by conntrack. For example, for a server on port 999, the policy must include an ingress rule allowing inbound traffic to port 999, and an egress rule to allow outbound traffic from port 999. - -In a doNotTrack policy: - -- Ingress rules apply to all incoming traffic through a host endpoint, regardless of where the traffic is going -- Egress rules apply only to traffic that is sent from the host endpoint (not a local workload) - -Finally, you must add an **applyOnForward: true expression** for a **doNotTrack policy** to work. - -## Before you begin... - -Before creating a **doNotTrack** network policy, read this [blog](https://www.tigera.io/blog/when-linux-conntrack-is-no-longer-your-friend/) to understand use cases, benefits, and trade offs. - -## How to - -### Bypass connection traffic for high connection server - -In the following example, a memcached server pod with **hostNetwork: true** was scheduled on the node memcached-node-1. We create a HostEndpoint for the node. Next, we create a GlobalNetwork Policy with symmetrical rules for ingress and egress with doNotTrack and applyOnForward set to true. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: memcached-node-1-eth0 - labels: - memcached: server -spec: - interfaceName: eth0 - node: memcached-node-1 - expectedIPs: - - 10.128.0.162 ---- -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: memcached-server -spec: - selector: memcached == 'server' - applyOnForward: true - doNotTrack: true - ingress: - - action: Allow - protocol: TCP - source: - selector: memcached == 'client' - destination: - ports: - - 12211 - egress: - - action: Allow - protocol: TCP - source: - ports: - - 12211 - destination: - selector: memcached == 'client' -``` - -## Additional resources - -[Global network policy](../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/index.mdx b/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/index.mdx deleted file mode 100644 index 65e6316852..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/extreme-traffic/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Use Calico network policy early in the Linux packet processing pipeline to handle extreme traffic scenarios. -hide_table_of_contents: true ---- - -# Policy for extreme traffic - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-labels.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-labels.mdx deleted file mode 100644 index 0dd6d5b625..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-labels.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -description: Calico automatic labels for use with resources. ---- - -# Calico automatic labels - -As a convenience, {{prodname}} provides immutable labels that are used for specific resources when evaluating selectors in policies. The labels make it easier to match resources in common ways (such as matching a namespace by name). - -## Labels for matching namespaces - -The label `projectcalico.org/name` is set to the name of the namespace. This allows for matching namespaces by name when using a `namespaceSelector` field. - -For example, the following GlobalNetworkPolicy applies to workloads with label, `color: red` in namespaces named, `"foo"` and `"bar"`. The policy allows ingress traffic to port 8080 from all workloads in a third namespace named, `"baz"`: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: foo-and-bar -spec: - namespaceSelector: projectcalico.org/name in {"foo", "bar"} - selector: color == "red" - types: - - Ingress - ingress: - - action: Allow - source: - namespaceSelector: projectcalico.org/name == "baz" - destination: - ports: - - 8080 -``` - -Be aware that the default values for `namespaceSelector` for NetworkPolicy and GlobalNetworkPolicy are different. For example: - -**In a network policy**, - - ```yaml - namespaceSelector: - selector: foo == "bar" - ``` -means "resources in the same namespace as the network policy that matches foo == 'bar'". - -**In a global network policy**, - - ```yaml - namespaceSelector: - selector: foo == "bar" - ``` -means "resources in any namespace and non-namespaced resources that match foo == 'bar'". - -Further, - - ```yaml - namespaceSelector: projectcalico.org/name == "some-namespace" - selector: foo == "bar" - ``` -is equivalent to: - - ```yaml - namespaceSelector: - selector: (foo == "bar") && (projectcalico.org/namespace == "some-namespace") - ``` - -### Labels for matching service accounts - -Similarly, the `projectcalico.org/name` label is applied to ServiceAccounts and allows for matching by name in a `serviceAccountSelector`. - -### Kubernetes labels for matching namespaces - -Kubernetes also has [automatic labeling](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#automatic-labelling), for example `kubernetes.io/metadata.name`. The Kubernetes namespace label serves the same purpose and can be used in the same way as the {{prodname}} label. The `projectcalico.org/name` label predates the automatic Kubernetes label. - -## Labels for matching workload endpoints - -WorkloadEndpoints (which represent Pods in Kubernetes, or VM instances in OpenStack), receive several automatic labels: - -* `projectcalico.org/orchestrator` is applied to all WorkloadEndpoints and allows Kubernetes Pods to be distinguished from OpenStack VM instances, and from HostEndpoints (which do not have the label): - -* `has(projectcalico.org/orchestrator)` matches only WorkloadEndpoints -* `projectcalico.org/orchestrator == "k8s"` matches only Kubernetes Pods - -* For WorkloadEndpoints that represent Kubernetes Pods, `projectcalico.org/namespace` contains the name of the pod's namespace. `projectcalico.org/namespace` predates the addition of `namespaceSelector` fields to GlobalNetworkPolicies; it serves the same purpose as the `projectcalico.org/name` label in a `namespaceSelector` field. The following GlobalNetworkPolicy is exactly equivalent to the example shown in the Namespaces section: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: foo-and-bar -spec: - selector: projectcalico.org/namespace in {"foo", "bar"} && color == "red" - types: - - Ingress - ingress: - - action: Allow - source: - selector: projectcalico.org/namespace == "baz" - destination: - ports: - - 8080 -``` - -## Use the correct selector with labels in policies - -{{prodname}} labels must be used with the correct selector or the policy will not work as designed (and there are no error messages in Manager UI or when applying the YAML). - -| Calico label | Usage requirements | Use in these resources... | -| --------------------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| `projectcalico.org/name` | Use with a **namespaceSelector** or **serviceAccountSelector**. | - Network policy
    - Staged network policy

    Namespaced resources that apply only to workload endpoint resources in the namespace.
    | -| `projectcalico.org/namespace` | Use only with selectors.

    Use the label as the label name, and a namespace name as the value to compare against (for example projectcalico.org/namespace == "default"). | - Global network policy
    - Staged global network policy

    Cluster-wide (non-namespaced) resources that apply to workload endpoint resources in all namespaces, and to host endpoint resources. | - diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-network-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-network-policy.mdx deleted file mode 100644 index a99d1a64a4..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-network-policy.mdx +++ /dev/null @@ -1,258 +0,0 @@ ---- -description: Create your first Calico network policies. Shows the rich features using sample policies that extend native Kubernetes network policy. ---- - -# Get started with Calico network policy - -## Big picture - -Enforce which network traffic that is allowed or denied using rules in Calico network policy. - -## Value - -### Extends Kubernetes network policy - -Calico network policy provides a richer set of policy capabilities than Kubernetes including: policy ordering/priority, deny rules, and more flexible match rules. While Kubernetes network policy applies only to pods, Calico network policy can be applied to multiple types of endpoints including pods, VMs, and host interfaces. Finally, when used with Istio service mesh, Calico network policy supports securing applications layers 5-7 match criteria, and cryptographic identity. - -### Write once, works everywhere - -No matter which cloud provider you use now, adopting Calico network policy means you write the policy once and it is portable. If you move to a different cloud provider, you don’t need to rewrite your Calico network policy. Calico network policy is a key feature to avoid cloud provider lock-in. - -### Works seamlessly with Kubernetes network policies - -You can use Calico network policy in addition to Kubernetes network policy, or exclusively. For example, you could allow developers to define Kubernetes network policy for their microservices. For broader and higher-level access controls that developers cannot override, you could allow only security or Ops teams to define Calico network policies. - -## Concepts - -### Endpoints - -Calico network policies apply to **endpoints**. In Kubernetes, each pod is a Calico endpoint. However, Calico can support other kinds of endpoints. There are two types of Calico endpoints: **workload endpoints** (such as a Kubernetes pod or OpenStack VM) and **host endpoints** (an interface or group of interfaces on a host). - -### Namespaced and global network policies - -**Calico network policy** is a namespaced resource that applies to pods/containers/VMs in that namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -``` - -**Calico global network policy** is a non-namespaced resource and can be applied to any kind of endpoint (pods, VMs, host interfaces) independent of namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-tcp-port-6379 -``` - -Because global network policies use **kind: GlobalNetworkPolicy**, they are grouped separately from **kind: NetworkPolicy**. For example, global network policies will not be returned from `calicoctl get networkpolicy`, and are rather returned from `calicoctl get globalnetworkpolicy`. - -### kubectl vs calicoctl - -Calico network policies and Calico global network policies are applied using calicoctl. Syntax is similar to Kubernetes, but there a few differences. For help, see [calicoctl user reference](../../../reference/calicoctl/overview.mdx). - -### Ingress and egress - -Each network policy rule applies to either **ingress** or **egress** traffic. From the point of view of an endpoint (pod, VM, host interface), **ingress** is incoming traffic to the endpoint, and **egress** is outgoing traffic from the endpoint. In a Calico network policy, you create ingress and egress rules independently (egress, ingress, or both). - -You can specify whether policy applies to ingress, egress, or both using the **types** field. If you do not use the types field, Calico defaults to the following values. - -| Ingress rule present? | Egress rule present? | Value | -| :-------------------: | :------------------: | :-------------: | -| No | No | Ingress | -| Yes | No | Ingress | -| No | Yes | Egress | -| Yes | Yes | Ingress, Egress | - -### Network traffic behaviors: deny and allow - -The Kubernetes network policy specification defines the following behavior: - -- If no network policies apply to a pod, then all traffic to/from that pod is allowed. -- If one or more network policies apply to a pod containing ingress rules, then only the ingress traffic specifically allowed by those policies is allowed. -- If one or more network policies apply to a pod containing egress rules, then only the egress traffic specifically allowed by those policies is allowed. - -For compatibility with Kubernetes, **Calico network policy** follows the same behavior for Kubernetes pods. For other endpoint types (VMs, host interfaces), Calico network policy is default deny. That is, only traffic specifically allowed by network policy is allowed, even if no network policies apply to the endpoint. - -## Before you begin - -`calicoctl` must be **installed** and **configured** before use. `calicoctl` will use etcd as the datastore by default, but many {{prodname}} installation manifests configure Kubernetes as the datastore. You can find more information on how to configure `calicoctl` in the following link: - -- [Configure `calicoctl`](../../../operations/calicoctl/configure/overview.mdx) - -## How to - -- [Control traffic to/from endpoints in a namespace](#control-traffic-tofrom-endpoints-in-a-namespace) -- [Control traffic to/from endpoints independent of namespace](#control-traffic-tofrom-endpoints-independent-of-namespace) -- [Control traffic to/from endpoints using IP addresses or CIDR ranges](#control-traffic-tofrom-endpoints-using-ip-addresses-or-cidr-ranges) -- [Apply network policies in specific order](#apply-network-policies-in-specific-order) -- [Generate logs for specific traffic](#generate-logs-for-specific-traffic) - -### Control traffic to/from endpoints in a namespace - -In the following example, ingress traffic to endpoints in the **namespace: production** with label **color: red** is allowed, only if it comes from a pod in the same namespace with **color: blue**, on port **6379**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - destination: - ports: - - 6379 -``` - -To allow ingress traffic from endpoints in other namespaces, use a **namespaceSelector** in the policy rule. A namespaceSelector matches namespaces based on the labels that are applied in the namespace. In the following example, ingress traffic is allowed from endpoints in namespaces that match **shape == circle**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - namespaceSelector: shape == 'circle' - destination: - ports: - - 6379 -``` - -### Control traffic to/from endpoints independent of namespace - -The following Calico network policy is similar to the previous example, but uses **kind: GlobalNetworkPolicy** so it applies to all endpoints, regardless of namespace. - -In the following example, incoming TCP traffic to any pods with label **color: red** is denied if it comes from a pod with **color: blue**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-blue -spec: - selector: color == 'red' - ingress: - - action: Deny - protocol: TCP - source: - selector: color == 'blue' -``` - -As with **kind: NetworkPolicy**, you can allow or deny ingress traffic from endpoints in specific namespaces using a namespaceSelector in the policy rule: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-circle-blue -spec: - selector: color == 'red' - ingress: - - action: Deny - protocol: TCP - source: - selector: color == 'blue' - namespaceSelector: shape == 'circle' -``` - -### Control traffic to/from endpoints using IP addresses or CIDR ranges - -Instead of using a selector to define which traffic is allowed to/from the endpoints in a network policy, you can also specify an IP block in CIDR notation. - -In the following example, outgoing traffic is allowed from pods with the label **color: red** if it goes to an IP address in the **1.2.3.4/24** CIDR block. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-egress-external - namespace: production -spec: - selector: color == 'red' - types: - - Egress - egress: - - action: Allow - destination: - nets: - - 1.2.3.0/24 -``` - -### Apply network policies in specific order - -To control the order/sequence of applying network policies, you can use the **order** field (with precedence from the lowest value to highest). Defining policy **order** is important when you include both **action: allow** and **action: deny** rules that may apply to the same endpoint. - -In the following example, the policy **allow-cluster-internal-ingress** (order: 10) will be applied before the **policy drop-other-ingress** (order: 20). - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: drop-other-ingress -spec: - order: 20 - #...deny policy rules here... -``` - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-cluster-internal-ingress -spec: - order: 10 - #...allow policy rules here... -``` - -### Generate logs for specific traffic - -In the following example, incoming TCP traffic to an application is denied, and each connection attempt is logged to syslog. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -Metadata: - name: allow-tcp-6379 - namespace: production -Spec: - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Log - protocol: TCP - source: - selector: role == 'frontend' - - action: Deny - protocol: TCP - source: - selector: role == 'frontend' -``` - -### Create policy for established connections - -Policies are immediately applied to any new connections. However, for existing connections that are already open, the policy changes will only take effect after the connection has been reestablished. This means that any ongoing sessions may not immediately reflect policy changes until they are initiated again. - -## Additional resources - -- For additional Calico network policy features, see [Calico network policy](../../../reference/resources/networkpolicy.mdx) and [Calico global network policy](../../../reference/resources/globalnetworkpolicy.mdx) -- For an alternative to using IP addresses or CIDRs in policy, see [Network sets](../../../reference/resources/networkset.mdx) -- For details on the calicoctl command line tool, see [calicoctl user reference](../../../reference/calicoctl/overview.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-policy-tutorial.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-policy-tutorial.mdx deleted file mode 100644 index 0fc30bf6c0..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/calico-policy-tutorial.mdx +++ /dev/null @@ -1,219 +0,0 @@ ---- -description: Learn how to create more advanced Calico network policies (namespace, allow and deny all ingress and egress). ---- - -# Calico policy tutorial - -Calico network policies **extend** the functionalities of Kubernetes network policies. To demonstrate this, this tutorial follows a similar approach to the [Kubernetes Advanced Network Policy Tutorial](../kubernetes-policy/kubernetes-policy-advanced.mdx), but instead uses Calico network policies and highlights differences between the two policy types, making use of features that are not available in Kubernetes network policies. - -## Requirements - -- A working Kubernetes cluster and access to it using kubectl and calicoctl -- Your Kubernetes nodes have connectivity to the public internet -- You are familiar with [Calico NetworkPolicy](calico-network-policy.mdx) - -## Tutorial flow - -1. Create the namespace and NGINX service -2. Configure default deny -3. Allow egress traffic from busybox -4. Allow ingress traffic to NGINX -5. Clean up - -## 1. Create the namespace and nginx service - -We'll use a new namespace for this guide. Run the following commands to create the namespace and a plain NGINX service listening on port 80. - -```bash -kubectl create ns advanced-policy-demo -kubectl create deployment --namespace=advanced-policy-demo nginx --image=nginx -kubectl expose --namespace=advanced-policy-demo deployment nginx --port=80 -``` - -### Verify access - allowed all ingress and egress - -Open up a second shell session which has `kubectl` connectivity to the Kubernetes cluster and create a busybox pod to test policy access. This pod will be used throughout this tutorial to test policy access. - -```bash -kubectl run --namespace=advanced-policy-demo access --rm -ti --image busybox /bin/sh -``` - -This will open up a shell session inside the `busybox` pod, as shown below. - -``` -Waiting for pod advanced-policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false - -If you don't see a command prompt, try pressing enter. -/ # -``` - -Now from within the busybox "access" pod execute the following command to test access to the nginx service. - -```bash -wget -q --timeout=5 nginx -O - -``` - -It returns the HTML of the nginx welcome page. - -Still within the busybox "access" pod, issue the following command to test access to google.com. - -```bash -wget -q --timeout=5 google.com -O - -``` - -It returns the HTML of the google.com home page. - -## 2. Lock down all traffic - -We will begin by using a default deny [Global Calico Network Policy](../../../reference/resources/globalnetworkpolicy.mdx) (which you can only do using Calico) that will help us adopt best practices in using a [zero trust network model](../../adopt-zero-trust.mdx) to secure our workloads. Note that Global Calico Network Policies are not namespaced and effect all pods that match the policy selector. In contrast, Kubernetes Network Policies are namespaced, so you would need to create a default deny policy per namespace to achieve the same effect. Note that to simplify this tutorial we exclude pods in the `kube-system`,`calico-system` and `calico-apiserver` namespace, so we don't have to consider the policies required to keep Kubernetes itself running smoothly when we apply our default deny. - -```bash -calicoctl create -f - < - - -Welcome to nginx!... -``` - -Next, try to retrieve the home page of google.com. - -```bash -wget -q --timeout=5 google.com -O - -``` - -It will return the HTML of the google home page. - -We have allowed our access pod access to the outside internet and the nginx service using Calico Network Policies! - -# 5. Clean up - -To clean up this tutorial session run the following commands to clean up the network policies and remove the demo namespace. - -```bash -calicoctl delete policy allow-busybox-egress -n advanced-policy-demo -calicoctl delete policy allow-nginx-ingress -n advanced-policy-demo -calicoctl delete gnp default-deny -kubectl delete ns advanced-policy-demo -``` diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/index.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/index.mdx deleted file mode 100644 index fce333b50a..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Calico network policy lets you secure both workloads and hosts. -hide_table_of_contents: true ---- - -# Calico policy - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/network-policy-openstack.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/network-policy-openstack.mdx deleted file mode 100644 index c30d615136..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/calico-policy/network-policy-openstack.mdx +++ /dev/null @@ -1,99 +0,0 @@ ---- -description: Extend OpenStack security groups by applying Calico network policy and using labels to identify VMs within network policy rules. ---- - -# Get started with Calico network policy for OpenStack - -## Big picture - -Use {{prodname}} network policy to extend security beyond OpenStack security groups. - -## Value - -For **deployment users**, OpenStack security groups provides enough features and flexibility. But for **deployment administrators**, limited labeling in VM security groups makes it difficult to address all security use cases that arise. {{prodname}} network policy provides special VM labels so you can identify VMs and impose additional restrictions that cannot be bypassed by users’ security group configuration. - -## Concepts - -### Multi-region deployments - -Using the OpenStack API, it is difficult to apply policy to cross-region network traffic because security groups are local to a single region. In {{prodname}}, each region in your OpenStack deployment becomes a separate {{prodname}} namespace in a single etcd datastore. With regions mapped to namespaces, you can easily define {{prodname}} network policy for communications between VMs in different regions. - -### Labels: more flexibility, greater security - -{{prodname}} provides predefined [VM endpoint labels](../../../networking/openstack/labels.mdx) (projects, security groups, and namespaces) for OpenStack deployments. You can use these labels in selector fields in {{prodname}} network policy to identify the VMs for allow/deny policy. - -### Policy ordering and enforcement - -{{prodname}} network policy is always enforced before OpenStack security groups, and cannot be overridden by user-level security group configuration. - -## Before you begin... - -- [Set up {{prodname}} for OpenStack](../../../networking/openstack/dev-machine-setup.mdx) -- If you are using a multi-region VM deployment, [follow these extra steps](../../../networking/openstack/multiple-regions.mdx) - -## How to - -- [Restrict all ingress traffic between specific security groups](#restrict-all-ingress-traffic-between-specific-security-groups) -- [Allow specific traffic between VMs in different regions](#allow-specific-traffic-between-vms-in-different-regions) - -### Restrict all ingress traffic between specific security groups - -In the following example, we create a **GlobalNetworkPolicy** that is applied before any OpenStack security group policy. It prevents all ingress communication between the OpenStack **superman** and **lexluthor** projects. We use the predefined {{prodname}} VM endpoint label, **openstack-project-name**, to identify projects. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-lexluthor-to-superman -spec: - order: 10 - selector: "projectcalico.org/openstack-project-name == 'superman'" - types: - - Ingress - ingress: - - action: Deny - source: - selector: "projectcalico.org/openstack-project-name == 'lexluthor'" ---- -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-superman-to-lexluthor -spec: - order: 10 - selector: "projectcalico.org/openstack-project-name == 'lexluthor'" - types: - - Ingress - ingress: - - action: Deny - source: - selector: "projectcalico.org/openstack-project-name == 'superman'" -``` - -### Allow specific traffic between VMs in different regions - -In the following example, we use the predefined VM endpoint label, **openstack-security_group_ID**. Traffic is allowed to VMs with the label, **openstack-a773…** on port 80, from VMs in any region with the label, **openstack-85cc…**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-tcp-80 -spec: - selector: 'has(sg.projectcalico.org/openstack-a7734e61-b545-452d-a3cd-0189cbd9747a)' - types: - - Ingress - ingress: - - action: Allow - protocol: TCP - source: - selector: 'has(sg.projectcalico.org/openstack-85cc3048-abc3-43cc-89b3-377341426ac5)' - destination: - ports: - - 80 -``` - -## Additional resources - -- For additional {{prodname}} network policy features, see [{{prodname}} network policy](../../../reference/resources/networkpolicy.mdx) and [Calico global network policy](../../../reference/resources/globalnetworkpolicy.mdx) -- For details on the OpenStack integration with {{prodname}}, see [{{prodname}} for OpenStack](../../../networking/openstack/dev-machine-setup.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/index.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/index.mdx deleted file mode 100644 index 7626a83757..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: If you are new to Kubernetes, start with "Kubernetes policy" and learn the basics of enforcing policy for pod traffic. Otherwise, dive in and create more powerful policies with Calico policy. The good news is, Kubernetes and Calico policies are very similar and work alongside each other -- so managing both types is easy. -hide_table_of_contents: true ---- - -# Get started with policy - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-default-deny.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-default-deny.mdx deleted file mode 100644 index 0b9d2384e8..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-default-deny.mdx +++ /dev/null @@ -1,151 +0,0 @@ ---- -description: Create a default deny network policy so pods that are missing policy are not allowed traffic until appropriate network policy is defined. ---- - -# Enable a default deny policy for Kubernetes pods - -## Big picture - -Enable a default deny policy for Kubernetes pods using Kubernetes or {{prodname}} network policy. - -## Value - -A **default deny** network policy provides an enhanced security posture so pods without policy (or incorrect policy) are not allowed traffic until appropriate network policy is defined. - -## Features - -This how-to guide uses the following {{prodname}} features: - -- **NetworkPolicy** -- **GlobalNetworkPolicy** - -## Concepts - -### Default deny/allow behavior - -**Default allow** means all traffic is allowed by default, unless otherwise specified. **Default deny** means all traffic is denied by default, unless explicitly allowed. **Kubernetes pods are default allow**, unless network policy is defined to specify otherwise. - -For compatibility with Kubernetes, **{{prodname}} network policy** enforcement follows the standard convention for Kubernetes pods: - -- If no network policies apply to a pod, then all traffic to/from that pod is allowed. -- If one or more network policies apply to a pod with type ingress, then only the ingress traffic specifically allowed by those policies is allowed. -- If one or more network policies apply to a pod with type egress, then only the egress traffic specifically allowed by those policies is allowed. - -For other endpoint types (VMs, host interfaces), the default behavior is to deny traffic. Only traffic specifically allowed by network policy is allowed, even if no network policies apply to the endpoint. - -## Before you begin - -To apply the sample {{prodname}} network policies in the following section, [install calicoctl](../../operations/calicoctl/install.mdx). - -## How to - -- [Create a default deny network policy](#crate-a-default-deny-network-policy) -- [Create a global default deny network policy](#create-a-global-default-deny-network-policy) - -### Create a default deny network policy - -Immediately after installation, a best practice is to create a namespaced default deny network policy to secure pods without policy or incorrect policy until you can put policies in place and test them. - -In the following example, we create a {{prodname}} default deny **NetworkPolicy** for all workloads in the namespace, **engineering**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: default-deny - namespace: engineering -spec: - selector: all() - types: - - Ingress - - Egress -``` - -Here's an equivalent default deny **Kubernetes network policy** for all pods in the namespace, **engineering** - -```yaml -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: default-deny - namespace: engineering -spec: - podSelector: {} - policyTypes: - - Ingress - - Egress -``` - -### Create a global default deny policy - -A default deny policy ensures that unwanted traffic (ingress and egress) is denied by default without you having to remember default deny/allow behavior of Kubernetes and {{prodname}} policies. This policy can also help mitigate risks of lateral malicious attacks. - -#### Best practice #1: Allow, stage, then deny - -We recommend that you create a global default deny policy after you complete writing policy for the traffic that you want to allow. The following steps summarizes the best practice to test and lock down the cluster to block unwanted traffic: - -1. Create a global default deny policy and test it in a staging environment. (The policy will show all the traffic that would be blocked if it were converted into a deny.) -1. Create network policies to individually allow the traffic shown as blocked in step 1 until no connections are denied. -1. Enforce the global default deny policy. - -#### Best practice #2: Keep the scope to non-system pods - -A global default deny policy applies to the entire cluster including all workloads in all namespaces, hosts (computers that run the hypervisor for VMs or container runtime for containers), including Kubernetes control plane and {{prodname}} control plane nodes and pods. - -For this reason, the best practice is to create a global default deny policy for **non-system pods** as shown in the following example. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: deny-app-policy -spec: - namespaceSelector: has(projectcalico.org/name) && projectcalico.org/name not in {"kube-system", "calico-system", "tigera-system"} - types: - - Ingress - - Egress - egress: - # allow all namespaces to communicate to DNS pods - - action: Allow - protocol: UDP - destination: - selector: 'k8s-app == "kube-dns"' - ports: - - 53 - - action: Allow - protocol: TCP - destination: - selector: 'k8s-app == "kube-dns"' - ports: - - 53 -``` - -Note the following: - -- Even though we call this policy "global default deny", the above policy is not explicitly denying traffic. By selecting the traffic with the `namespaceSelector` but not specifying an allow, the traffic is denied after all other policy is evaluated. This design also makes it unnecessary to ensure any specific order (priority) for the default-deny policy. -- Allowing access to `kube-dns` simplifies per-pod policies because you don't need to duplicate the DNS rules in every policy -- The policy deliberately excludes the `kube-system`, `calico-system`, and `tigera-system` namespaces by using a negative `namespaceSelector` to avoid impacting any control plane components - -In a staging environment, verify that the policy does not block any necessary traffic before enforcing it. - -### Don't try this! - -The following policy works and looks fine on the surface. But as described in Best practices #2, the policy is too broad in scope and could break your cluster. Therefore, we do not recommend adding this type of policy, even if you have verified allowed traffic in your staging environment. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: default.default-deny -spec: - tier: default - selector: all() - types: - - Ingress - - Egress -``` - -## Additional resources - -- [Network policy](../../reference/resources/networkpolicy.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) \ No newline at end of file diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/index.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/index.mdx deleted file mode 100644 index f2cfe5b06b..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Manage your Kubernetes network policies right alongside the more powerful Calico network policies. -hide_table_of_contents: true ---- - -# Kubernetes policy - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx deleted file mode 100644 index f3d32f518c..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -description: An interactive demo that visually shows how applying Kubernetes policy allows and denies connections. ---- - -# Kubernetes policy, demo - -The included demo sets up a frontend and backend service, as well as a client service, all -running on Kubernetes. It then configures network policy on each service. - -## Prerequisites - -To create a Kubernetes cluster which supports the Kubernetes network policy API, follow -one of our [getting started guides](../../../getting-started/index.mdx). - -## Running the stars example - -### 1) Create the frontend, backend, client, and management-ui apps. - -```shell -kubectl create -f {{tutorialFilesURL}}/00-namespace.yaml -kubectl create -f {{tutorialFilesURL}}/01-management-ui.yaml -kubectl create -f {{tutorialFilesURL}}/02-backend.yaml -kubectl create -f {{tutorialFilesURL}}/03-frontend.yaml -kubectl create -f {{tutorialFilesURL}}/04-client.yaml -``` - -Wait for all the pods to enter `Running` state. - -```bash -kubectl get pods --all-namespaces --watch -``` - -> Note that it may take several minutes to download the necessary Docker images for this demo. - -The management UI runs as a `NodePort` Service on Kubernetes, and shows the connectivity -of the Services in this example. - -You can view the UI by visiting `http://:30002` in a browser. - -Once all the pods are started, they should have full connectivity. You can see this by visiting the UI. Each service is -represented by a single node in the graph. - -- `backend` -> Node "B" -- `frontend` -> Node "F" -- `client` -> Node "C" - -### 2) Enable isolation - -Running following commands will prevent all access to the frontend, backend, and client Services. - -```shell -kubectl create -n stars -f {{tutorialFilesURL}}/default-deny.yaml -kubectl create -n client -f {{tutorialFilesURL}}/default-deny.yaml -``` - -#### Confirm isolation - -Refresh the management UI (it may take up to 10 seconds for changes to be reflected in the UI). -Now that we've enabled isolation, the UI can no longer access the pods, and so they will no longer show up in the UI. - -### 3) Allow the UI to access the services using network policy objects - -Apply the following YAML files to allow access from the management UI. - -```shell -kubectl create -f {{tutorialFilesURL}}/allow-ui.yaml -kubectl create -f {{tutorialFilesURL}}/allow-ui-client.yaml -``` - -After a few seconds, refresh the UI - it should now show the Services, but they should not be able to access each other anymore. - -### 4) Create the backend-policy.yaml file to allow traffic from the frontend to the backend - -```shell -kubectl create -f {{tutorialFilesURL}}/backend-policy.yaml -``` - -Refresh the UI. You should see the following: - -- The frontend can now access the backend (on TCP port 6379 only). -- The backend cannot access the frontend at all. -- The client cannot access the frontend, nor can it access the backend. - -### 5) Expose the frontend service to the client namespace - -```shell -kubectl create -f {{tutorialFilesURL}}/frontend-policy.yaml -``` - -The client can now access the frontend, but not the backend. Neither the frontend nor the backend -can initiate connections to the client. The frontend can still access the backend. - -To use {{prodname}} to enforce egress policy on Kubernetes pods, see [the advanced policy demo](kubernetes-policy-advanced.mdx). - -### 6) (Optional) Clean up the demo environment - -You can clean up the demo by deleting the demo Namespaces: - -```bash -kubectl delete ns client stars management-ui -``` diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx deleted file mode 100644 index 06a4be6807..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-network-policy.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -description: Learn Kubernetes policy syntax, rules, and features for controlling network traffic. ---- - -# Get started with Kubernetes network policy - -## Big picture - -Kubernetes network policy lets administrators and developers enforce which network traffic is allowed using rules. - -## Value - -Kubernetes network policy lets developers secure access to and from their applications using the same simple language they use to deploy them. Developers can focus on their applications without understanding low-level networking concepts. Enabling developers to easily secure their applications using network policies supports a shift left DevOps environment. - -## Concepts - -The Kubernetes Network Policy API provides a standard way for users to define network policy for controlling network traffic. However, Kubernetes has no built-in capability to enforce the network policy. To enforce network policy, you must use a network plugin such as Calico. - -### Ingress and egress - -The bulk of securing network traffic typically revolves around defining egress and ingress rules. From the point of view of a Kubernetes pod, **ingress** is incoming traffic to the pod, and **egress** is outgoing traffic from the pod. In Kubernetes network policy, you create ingress and egress “allow” rules independently (egress, ingress, or both). - -### Default deny/allow behavior - -**Default allow** means all traffic is allowed by default, unless otherwise specified. -**Default deny** means all traffic is denied by default, unless explicitly allowed. - -## How to - -Before you create your first Kubernetes network policy, you need to understand the default network policy behaviors. If no Kubernetes network policies apply to a pod, then all traffic to/from the pod are allowed (default-allow). As a result, if you do not create any network policies, then all pods are allowed to communicate freely with all other pods. If one or more Kubernetes network policies apply to a pod, then only the traffic specifically defined in that network policy are allowed (default-deny). - -You are now ready to start fine-tuning traffic that should be allowed. - -- [Create ingress policies](#create-ingress-policies) -- [Allow ingress traffic from pods in the same namespace](#allow-ingress-traffic-from-pods-in-the-same-namespace) -- [Allow ingress traffic from pods in a different namespace](#allow-ingress-traffic-from-pods-in-a-different-namespace) -- [Create egress policies](#create-egress-policies) -- [Allow egress traffic from pods in the same namespace](#allow-egress-traffic-from-pods-in-the-same-namespace) -- [Allow egress traffic to IP addresses or CIDR range](#allow-egress-traffic-to-ip-addresses-or-cidr-range) -- [Best practice: create deny-all default network policy](#best-practice-create-deny-all-default-network-policy) -- [Create deny-all default ingress and egress network policy](#create-deny-all-default-ingress-and-egress-network-policy) - -### Create ingress policies - -Create ingress network policies to allow inbound traffic from other pods. - -Network policies apply to pods within a specific **namespace**. Policies can include one or more ingress rules. To specify which pods in the namespace the network policy applies to, use a **pod selector**. Within the ingress rule, use another pod selector to define which pods allow incoming traffic, and the **ports** field to define on which ports traffic is allowed. - -#### Allow ingress traffic from pods in the same namespace - -In the following example, incoming traffic to pods with label **color=blue** are allowed only if they come from a pod with **color=red**, on port **80**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-same-namespace - namespace: default -spec: - podSelector: - matchLabels: - color: blue - ingress: - - from: - - podSelector: - matchLabels: - color: red - ports: - - port: 80 -``` - -#### Allow ingress traffic from pods in a different namespace - -To allow traffic from pods in a different namespace, use a namespace selector in the ingress policy rule. In the following policy, the namespace selector matches one or more Kubernetes namespaces and is combined with the pod selector that selects pods within those namespaces. - -:::note - -Namespace selectors can be used only in policy rules. The **spec.podSelector** applies to pods only in the same namespace as the policy. - -::: - -In the following example, incoming traffic is allowed only if they come from a pod with label **color=red**, in a namespace with label **shape=square**, on port **80**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-different-namespace - namespace: default -spec: - podSelector: - matchLabels: - color: blue - ingress: - - from: - - podSelector: - matchLabels: - color: red - namespaceSelector: - matchLabels: - shape: square - ports: - - port: 80 -``` - -### Create egress policies - -Create egress network policies to allow outbound traffic from pods. - -#### Allow egress traffic from pods in the same namespace - -The following policy allows pod outbound traffic to other pods in the same namespace that match the pod selector. In the following example, outbound traffic is allowed only if they go to a pod with label **color=red**, on port **80**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-egress-same-namespace - namespace: default -spec: - podSelector: - matchLabels: - color: blue - egress: - - to: - - podSelector: - matchLabels: - color: red - ports: - - port: 80 -``` - -#### Allow egress traffic to IP addresses or CIDR range - -Egress policies can also be used to allow traffic to specific IP addresses and CIDR ranges. Typically, IP addresses/ranges are used to handle traffic that is external to the cluster for static resources or subnets. - -The following policy allows egress traffic to pods in CIDR, **172.18.0.0/24**. - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: allow-egress-external - namespace: default -spec: - podSelector: - matchLabels: - color: red - egress: - - to: - - ipBlock: - cidr: 172.18.0.0/24 -``` - -### Best practice: create deny-all default network policy - -To ensure that all pods in the namespace are secure, a best practice is to create a default network policy. This avoids accidentally exposing an app or version that doesn’t have policy defined. - -#### Create deny-all default ingress and egress network policy - -The following network policy implements a default **deny-all** ingress and egress policy, which prevents all traffic to/from pods in the **policy-demo** namespace. Note that the policy applies to all pods in the policy-demo namespace, but does not explicitly allow any traffic. All pods are selected, but because the default changes when pods are selected by a network policy, the result is: **deny all ingress and egress traffic**. (Unless the traffic is allowed by another network policy). - -```yaml -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - name: default-deny - namespace: policy-demo -spec: - podSelector: - matchLabels: {} - policyTypes: - - Ingress - - Egress -``` - -## Additional resources - -- [Kubernetes Network Policy API documentation](https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/) diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx deleted file mode 100644 index b0cf5131d9..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx +++ /dev/null @@ -1,342 +0,0 @@ ---- -description: Learn how to create more advanced Kubernetes network policies (namespace, allow and deny all ingress and egress). ---- - -# Kubernetes policy, advanced tutorial - -The Kubernetes `NetworkPolicy` API allows users to express ingress and egress policies (starting with Kubernetes 1.8.0) to Kubernetes pods -based on labels and ports. - -This guide walks through using Kubernetes `NetworkPolicy` to define more complex network policies. - -## Requirements - -- A working Kubernetes cluster and access to it using kubectl -- Your Kubernetes nodes have connectivity to the public internet -- You are familiar with [Kubernetes NetworkPolicy](kubernetes-policy-basic.mdx) - -## Tutorial flow - -1. Create the Namespace and Nginx Service -1. Deny all ingress traffic -1. Allow ingress traffic to Nginx -1. Deny all egress traffic -1. Allow egress traffic to kube-dns -1. Cleanup Namespace - -## 1. Create the namespace and nginx service - -We'll use a new namespace for this guide. Run the following commands to create it and a plain nginx service listening on port 80. - -```bash -kubectl create ns advanced-policy-demo -kubectl create deployment --namespace=advanced-policy-demo nginx --image=nginx -kubectl expose --namespace=advanced-policy-demo deployment nginx --port=80 -``` - -### Verify access - allowed all ingress and egress - -Open up a second shell session which has `kubectl` connectivity to the Kubernetes cluster and create a busybox pod to test policy access. This pod will be used throughout this tutorial to test policy access. - -```bash -kubectl run --namespace=advanced-policy-demo access --rm -ti --image busybox /bin/sh -``` - -This should open up a shell session inside the `access` pod, as shown below. - -``` -Waiting for pod advanced-policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false - -If you don't see a command prompt, try pressing enter. -/ # -``` - -Now from within the busybox "access" pod execute the following command to test access to the nginx service. - -```bash -wget -q --timeout=5 nginx -O - -``` - -It should return the HTML of the nginx welcome page. - -Still within the busybox "access" pod, issue the following command to test access to google.com. - -```bash -wget -q --timeout=5 google.com -O - -``` - -It should return the HTML of the google.com home page. - -## 2. Deny all ingress traffic - -Enable ingress isolation on the namespace by deploying a [default deny all ingress traffic policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-ingress-traffic). - -```bash -kubectl create -f - < - - -Welcome to nginx!... -``` - -After creating the policy, we can now access the nginx Service. - -## 4. Deny all egress traffic - -Enable egress isolation on the namespace by deploying a [default deny all egress traffic policy](https://kubernetes.io/docs/concepts/services-networking/network-policies/#default-deny-all-egress-traffic). - -```bash -kubectl create -f - < - - -Welcome to nginx!... -``` - -Next, try to retrieve the home page of google.com. - -```bash -wget -q --timeout=5 google.com -O - -``` - -It should return: - -``` -wget: download timed out -``` - -Access to `google.com` times out because it can resolve DNS but has no egress access to anything other than pods with labels matching `app: nginx` in the `advanced-policy-demo` namespace. - -# 7. Clean up namespace - -You can clean up after this tutorial by deleting the advanced policy demo namespace. - -```bash -kubectl delete ns advanced-policy-demo -``` diff --git a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx b/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx deleted file mode 100644 index 8210d8d19f..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx +++ /dev/null @@ -1,207 +0,0 @@ ---- -description: Learn how to use basic Kubernetes network policy to securely restrict traffic to/from pods. ---- - -# Kubernetes policy, basic tutorial - -This guide provides a simple way to try out Kubernetes `NetworkPolicy` with {{prodname}}. It requires a Kubernetes cluster configured with {{prodname}} networking, and expects that you have `kubectl` configured to interact with the cluster. - -You can quickly and easily deploy such a cluster by following one of the [installation guides](../../../getting-started/kubernetes/index.mdx). - -## Configure namespaces - -This guide will deploy pods in a Kubernetes namespace. Let's create the `Namespace` object for this guide. - -```bash -kubectl create ns policy-demo -``` - -## Create demo pods - -We'll use Kubernetes `Deployment` objects to easily create pods in the namespace. - -1. Create some nginx pods in the `policy-demo` namespace. - - ```bash - kubectl create deployment --namespace=policy-demo nginx --image=nginx - ``` - -1. Expose them through a service. - - ```bash - kubectl expose --namespace=policy-demo deployment nginx --port=80 - ``` - -1. Ensure the nginx service is accessible. - - ```bash - kubectl run --namespace=policy-demo access --rm -ti --image busybox /bin/sh - ``` - - This should open up a shell session inside the `access` pod, as shown below. - - ``` - Waiting for pod policy-demo/access-472357175-y0m47 to be running, status is Pending, pod ready: false - - If you don't see a command prompt, try pressing enter. - - / # - ``` - -1. From inside the `access` pod, attempt to reach the `nginx` service. - - ```bash - wget -q nginx -O - - ``` - - You should see a response from `nginx`. Great! Our service is accessible. You can exit the pod now. - -## Enable isolation - -Let's turn on isolation in our `policy-demo` namespace. {{prodname}} will then prevent connections to pods in this namespace. - -Running the following command creates a NetworkPolicy which implements a default deny behavior for all pods in the `policy-demo` namespace. - -```bash -kubectl create -f - < diff --git a/calico_versioned_docs/version-3.25/network-policy/hosts/kubernetes-nodes.mdx b/calico_versioned_docs/version-3.25/network-policy/hosts/kubernetes-nodes.mdx deleted file mode 100644 index 22cd391b74..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/hosts/kubernetes-nodes.mdx +++ /dev/null @@ -1,214 +0,0 @@ ---- -description: Protect Kubernetes nodes with host endpoints managed by Calico. ---- - -# Protect Kubernetes nodes - -## Big picture - -Secure Kubernetes nodes with host endpoints managed by {{prodname}}. - -## Value - -{{prodname}} can automatically create host endpoints for your Kubernetes nodes. This means {{prodname}} can manage the lifecycle of host endpoints as your cluster evolves, ensuring nodes are always protected by policy. - -## Concepts - -## Host endpoints - -Each host has one or more network interfaces that it uses to communicate externally. You can represent these interfaces in Calico using host endpoints and then use network policy to secure them. - -{{prodname}} host endpoints can have labels, and they work the same as labels on workload endpoints. The network policy rules can apply to both workload and host endpoints using label selectors. - -Automatic host endpoints secure all of the host's interfaces (i.e. in Linux, all the interfaces in the host network namespace). They are created by setting `interfaceName: "*"`. - -## Automatic host endpoints - -{{prodname}} creates a wildcard host endpoint for each node, with the host endpoint containing the same labels and IP addresses as its corresponding node. -{{prodname}} will ensure these managed host endpoints maintain the same labels and IP addresses as its node by periodic syncs. -This means that policy targeting these automatic host endpoints will function correctly with the policy put in place to select those nodes, even if over time the node's IPs or labels change. - -Automatic host endpoints are differentiated from other host endpoints by the label `projectcalico.org/created-by: calico-kube-controllers`. -Enable or disable automatic host endpoints by configuring the default KubeControllersConfiguration resource. - -## Before you begin... - -Have a running {{prodname}} cluster with `calicoctl` installed. - -## How to - -- [Enable automatic host endpoints](#enable-automatic-host-endpoints) -- [Apply network policy to automatic host endpoints](#apply-network-policy-to-automatic-host-endpoints) - -### Enable automatic host endpoints - -To enable automatic host endpoints, edit the default KubeControllersConfiguration instance, and set `spec.controllers.node.hostEndpoint.autoCreate` to `true`: - -```bash -calicoctl patch kubecontrollersconfiguration default --patch='{"spec": {"controllers": {"node": {"hostEndpoint": {"autoCreate": "Enabled"}}}}}' -``` - -If successful, host endpoints are created for each of your cluster's nodes: - -```bash -calicoctl get heps -owide -``` - -The output may look similar to this: - -``` -calicoctl get heps -owide -NAME NODE INTERFACE IPS PROFILES -ip-172-16-101-147.us-west-2.compute.internal-auto-hep ip-172-16-101-147.us-west-2.compute.internal * 172.16.101.147,192.168.228.128 projectcalico-default-allow -ip-172-16-101-54.us-west-2.compute.internal-auto-hep ip-172-16-101-54.us-west-2.compute.internal * 172.16.101.54,192.168.107.128 projectcalico-default-allow -ip-172-16-101-79.us-west-2.compute.internal-auto-hep ip-172-16-101-79.us-west-2.compute.internal * 172.16.101.79,192.168.91.64 projectcalico-default-allow -ip-172-16-101-9.us-west-2.compute.internal-auto-hep ip-172-16-101-9.us-west-2.compute.internal * 172.16.101.9,192.168.71.192 projectcalico-default-allow -ip-172-16-102-63.us-west-2.compute.internal-auto-hep ip-172-16-102-63.us-west-2.compute.internal * 172.16.102.63,192.168.108.192 projectcalico-default-allow -``` - -### Apply network policy to automatic host endpoints - -To apply policy that targets all Kubernetes nodes, first add a label to the nodes. -The label will be synced to their automatic host endpoints. - -For example, to add the label **kubernetes-host** to all nodes and their host endpoints: - -```bash -kubectl label nodes --all kubernetes-host= -``` - -And an example policy snippet: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: all-nodes-policy -spec: - selector: has(kubernetes-host) - # -``` - -To select a specific set of host endpoints (and their corresponding Kubernetes nodes), use a policy selector that selects a label unique to that set of host endpoints. -For example, if we want to add the label **environment=dev** to nodes named node1 and node2: - -```bash -kubectl label node node1 environment=dev -kubectl label node node2 environment=dev -``` - -With the labels in place and automatic host endpoints enabled, host endpoints for node1 and node2 will be updated with the **environment=dev** label. -We can write policy to select that set of nodes with a combination of selectors: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: some-nodes-policy -spec: - selector: has(kubernetes-host) && environment == 'dev' - # -``` - -## Tutorial - -This tutorial will lock down Kubernetes node ingress to only allow SSH and required ports for Kubernetes to function. -We will apply two policies: one for the control plane nodes. and one for the worker nodes. - -:::note - -Note: This tutorial was tested on a cluster created with kubeadm v1.18.2 on AWS, using a "stacked etcd" [topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/). Stacked etcd topology means the etcd pods are running on the masters. kubeadm uses stacked etcd by default. -If your Kubernetes cluster is on a different platform, is running a variant of Kubernetes, or is running a topology with an external etcd cluster, -please review the required ports for control plane and worker nodes in your cluster and adjust the policies in this tutorial as needed. - -::: - -First, let's restrict ingress traffic to the control plane nodes. The ingress policy below contains three rules. -The first rule allows access to the API server port from anywhere. The second rule allows all traffic to localhost, which -allows Kubernetes to access control plane processes. These control plane processes includes the etcd server client API, the scheduler, and the controller-manager. -This rule also allows localhost access to the kubelet API and calico/node health checks. -And the final rule allows the etcd pods to peer with each other and allows the masters to access each others kubelet API. - -If you have not modified the failsafe ports, you should still have SSH access to the nodes after applying this policy. -Now apply the ingress policy for the Kubernetes masters: - -``` -calicoctl apply -f - << EOF -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: ingress-k8s-masters -spec: - selector: has(node-role.kubernetes.io/master) - # This rule allows ingress to the Kubernetes API server. - ingress: - - action: Allow - protocol: TCP - destination: - ports: - # kube API server - - 6443 - # This rule allows all traffic to localhost. - - action: Allow - destination: - nets: - - 127.0.0.0/8 - # This rule is required in multi-master clusters where etcd pods are colocated with the masters. - # Allow the etcd pods on the masters to communicate with each other. 2380 is the etcd peer port. - # This rule also allows the masters to access the kubelet API on other masters (including itself). - - action: Allow - protocol: TCP - source: - selector: has(node-role.kubernetes.io/master) - destination: - ports: - - 2380 - - 10250 -EOF -``` - -Note that the above policy selects the standard **node-role.kubernetes.io/master** label that kubeadm sets on control plane nodes. - -Next, we need to apply policy to restrict ingress to the Kubernetes workers. -Before adding the policy we will add a label to all of our worker nodes, which then gets added to its automatic host endpoint. -For this tutorial we will use **kubernetes-worker**. An example command to add the label to worker nodes: - -```bash -kubectl get node -l '!node-role.kubernetes.io/master' -o custom-columns=NAME:.metadata.name | tail -n +2 | xargs -I{} kubectl label node {} kubernetes-worker= -``` - -The workers' ingress policy consists of two rules. The first rule allows all traffic to localhost. As with the masters, -the worker nodes need to access their localhost kubelet API and calico/node healthcheck. -The second rule allows the masters to access the workers kubelet API. Now apply the policy: - -``` -calicoctl apply -f - << EOF -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: ingress-k8s-workers -spec: - selector: has(kubernetes-worker) - # Allow all traffic to localhost. - ingress: - - action: Allow - destination: - nets: - - 127.0.0.0/8 - # Allow only the masters access to the nodes kubelet API. - - action: Allow - protocol: TCP - source: - selector: has(node-role.kubernetes.io/master) - destination: - ports: - - 10250 -EOF -``` - -## Additional resources - -- [Protect hosts tutorial](protect-hosts-tutorial.mdx) -- [Apply policy to Kubernetes node ports](../services/kubernetes-node-ports.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Host endpoints](../../reference/resources/hostendpoint.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts-tutorial.mdx b/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts-tutorial.mdx deleted file mode 100644 index 518252af56..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts-tutorial.mdx +++ /dev/null @@ -1,192 +0,0 @@ ---- -description: Learn how to secure incoming traffic from outside the cluster using Calico host endpoints with network policy, including allowing controlled access to specific Kubernetes services. ---- - -# Protect hosts tutorial - -Imagine that the administrator of a Kubernetes cluster wants to secure it as much as -possible against incoming traffic from outside the cluster. But suppose that -the cluster provides various useful services that are exposed as Kubernetes -NodePorts, i.e., as well-known TCP port numbers that appear to be available on -any node in the cluster. The administrator does want to expose some -of those NodePorts to traffic from outside. - -In this example we will use pre-DNAT policy applied to the external interfaces -of each cluster node: - -- to disallow incoming traffic from outside, in general - -- but then to allow incoming traffic to particular NodePorts. - -We use pre-DNAT policy for these purposes, instead of normal host endpoint -policy, because: - -1. We want the protection against general external traffic to apply regardless - of where that traffic is destined for - for example, to a locally hosted - pod, or to a pod on another node, or to a local server process running on - the host itself. Pre-DNAT policy is enforced in all of those cases - as we - want - whereas normal host endpoint policy is not enforced for traffic going - to a local pod. - -2. We want to write this policy in terms of the advertised NodePorts, not in - terms of whatever internal port numbers those may be transformed to. - kube-proxy on the ingress node will use a DNAT to change a NodePort number - and IP address to those of one of the pods that backs the relevant Service. - Our policy therefore needs to take effect _before_ that DNAT - and that - means that it must be a pre-DNAT policy. - -:::note - -Note: This tutorial is intended to be used with named host endpoints, i.e. host endpoints with `interfaceName` set to a specific interface name. -This tutorial does not work, as-is, with host endpoints with `interfaceName: "*"`. - -::: - -Here is the pre-DNAT policy that we need to disallow incoming external traffic -in general: - -```bash -calicoctl apply -f - < -``` - -and then using `host-endpoint==''` as the selector of the -`allow-nodeport` policy, instead of `has(host-endpoint)`. diff --git a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts.mdx b/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts.mdx deleted file mode 100644 index b4673b68fe..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/hosts/protect-hosts.mdx +++ /dev/null @@ -1,184 +0,0 @@ ---- -description: Calico network policy not only protects workloads, but also hosts. Create a Calico network policies to restrict traffic to/from hosts. ---- - -# Protect hosts - -## Big picture - -Use {{prodname}} network policy to restrict traffic to/from hosts. - -## Value - -Restricting traffic between hosts and the outside world is not unique to {{prodname}}; many solutions provide this capability. However, the advantage of using {{prodname}} to protect the host is you can use the same {{prodname}} policy configuration as workloads. You only need to learn one tool. Write a cluster-wide policy, and it is immediately applied to every host. - -## Concepts - -### Hosts and workloads - -In the context of {{prodname}} configuration, a **workload** is a virtualized compute instance, like a VM or container. A **host** is the computer that runs the hypervisor (for VMs), or container runtime (for containers). We say it “hosts” the workloads as guests. - -### Host endpoints - -Each host has one or more network interfaces that it uses to communicate externally. You can use {{prodname}} network policy to secure these interfaces (called host endpoints). {{prodname}} host endpoints can have labels, and they work the same as labels on workload endpoints. The network policy rules can apply to both workload and host endpoints using label selectors. - -### Failsafe rules - -It is easy to inadvertently cut all host connectivity because of nonexistent or misconfigured network policy. To avoid this, {{prodname}} provides failsafe rules with default/configurable ports that are open on all host endpoints. - -### Default behavior of workload to host traffic - -By default, {{prodname}} blocks all connections from a workload to its local host. You can control whether connections from a workload endpoint to its local host are dropped, returned, or accepted using a simple parameter. - -{{prodname}} allows all connections from processes running on the host to guest workloads on the host. This allows host processes to run health checks and debug guest workloads. - -### Default behavior of external traffic to/from host - -If a host endpoint is added and network policy is not in place, the {{prodname}} default is to deny traffic to/from that endpoint (except for traffic allowed by failsafe rules). For host endpoints, {{prodname}} blocks traffic only to/from interfaces that it’s been explicitly told about in network policy. Traffic to/from other interfaces is ignored. - -### Other host protection - -In terms of design consistency in {{prodname}}, you may wonder about the following use cases. - -**Does {{prodname}} protect a local host from workloads?**
    -Yes. DefaultEndpointToHostAction controls whether or not workloads can access their local host.
    - -**Does {{prodname}} protect a workload from the host it is running on?**
    -No. {{prodname}} allows connections the host makes to the workloads running on that host. Some orchestrators like Kubernetes depend on this connectivity for health checking the workload. Moreover, processes running on the local host are often privileged enough to override local {{prodname}} policy. Be very cautious with the processes that you allow to run in the host's root network namespace. - -## Before you begin... - -If you are already running {{prodname}} for Kubernetes, you are good to go. If you want to install {{prodname}} on a non-cluster machine for host protection only, see [Non-cluster hosts](../../getting-started/bare-metal/index.mdx). - -## How to - -- [Avoid accidentally cutting all host connectivity ](#avoid-accidentally-cutting-all-host-connectivity) -- [Use policy to restrict host traffic](#use-policy-to-restrict-host-traffic) -- [Control default behavior of workload endpoint to host traffic](#control-default-behavior-of-workload-endpoint-to-host-traffic) - -### Avoid accidentally cutting all host connectivity - -To avoid inadvertently cutting all host connectivity because of nonexistent or misconfigured network policy, {{prodname}} uses failsafe rules that open specific ports and CIDRs on all host endpoints. - -Review the following table to determine if the defaults work for your implementation. If not, change the default ports using the parameters, **FailsafeInboundHostPorts** and **FailsafeOutboundHostPorts** in [Configuring Felix](../../reference/felix/configuration.mdx#environment-variables). - -| Port | Protocol | CIDR | Direction | Purpose | -| ---- | -------- | --------- | ------------------ | ------------------------------------ | -| 22 | TCP | 0.0.0.0/0 | Inbound | SSH access | -| 53 | UDP | 0.0.0.0/0 | Outbound | DNS queries | -| 67 | UDP | 0.0.0.0/0 | Outbound | DHCP access | -| 68 | UDP | 0.0.0.0/0 | Inbound | DHCP access | -| 179 | TCP | 0.0.0.0/0 | Inbound & Outbound | BGP access ({{prodname}} networking) | -| 2379 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd access | -| 2380 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd access | -| 6443 | TCP | 0.0.0.0/0 | Inbound & Outbound | Kubernetes API server access | -| 6666 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd self-hosted service access | -| 6667 | TCP | 0.0.0.0/0 | Inbound & Outbound | etcd self-hosted service access | - -### Use policy to restrict host traffic - -#### Step 1: Create policy to restrict host traffic - -Although failsafe rules provide protection from removing all connectivity to a host, you should create a GlobalNetworkPolicy policy that restricts host traffic. - -In the following example, we use a **GlobalNetworkPolicy** that applies to all worker nodes (defined by a label). Ingress SSH access is allowed from a defined "management" subnet. - -**Ingress traffic** is also allowed for ICMP, and on TCP port 10250 (default kubelet port). **Egress** traffic is allowed to etcd on a particular IP, and UDP on port 53 and 67 for DNS and DHCP. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: k8s-worker -spec: - selector: "role == 'k8s-worker'" - order: 0 - ingress: - - action: Allow - protocol: TCP - source: - nets: - - '' - destination: - ports: [22] - - action: Allow - protocol: ICMP - - action: Allow - protocol: TCP - destination: - ports: [10250] - egress: - - action: Allow - protocol: TCP - destination: - nets: - - '/32' - ports: [2379] - - action: Allow - protocol: UDP - destination: - ports: [53, 67] -``` - -#### Step 2: Create host endpoints - -For each host point that you want to secure with policy, you must create a **HostEndpoint** object. To do that, you need the name of the {{prodname}} node on the host that owns the interface; in most cases, it is the same as the hostname of the host. - -In the following example, we create a HostEndpoint for the host named **my-host** with the interface named **eth0**, with **IP 10.0.0.1**. Note that the value for **node:** must match the hostname used on the {{prodname}} node object. - -When the HostEndpoint is created, traffic to or from the interface is dropped unless policy is in place. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: my-host-eth0 - labels: - role: k8s-worker - environment: production -spec: - interfaceName: eth0 - node: my-host - expectedIPs: ['10.0.0.1'] -``` - -### Control default behavior of workload endpoint to host traffic - -The default {{prodname}} behavior blocks all connections from workloads to their local host (after traffic passes any egress policy applied to the workload). You can change this behavior using the **DefaultEndpointToHostAction** parameter in Felix configuration. - -This parameter works at the IP table level, where you can specify packet behavior to **Drop** (default), **Accept**, or **Return**. - -To change this parameter for all hosts, edit the **FelixConfiguration** object named “default.” - -1. Get a copy of the object to edit. - - ```bash - calicoctl get felixconfiguration default --export -o yaml > default-felix-config.yaml - ``` - -1. Open the file in a text editor and add the parameter, **defaultEndpointToHostAction**. For example: - - ```yaml - apiVersion: projectcalico.org/v3 - kind: FelixConfiguration - metadata: - name: default - spec: - ipipEnabled: true - logSeverityScreen: Info - reportingInterval: 0s - defaultEndpointToHostAction: Accept - ``` - -1. Update the FelixConfiguration on the cluster. - ```bash - calicoctl apply -f default-felix-config.yaml - ``` - -## Additional resources - -- [Apply policy to Kubernetes node ports](../services/kubernetes-node-ports.mdx) -- [Protect Kubernetes nodes with host endpoints managed by {{prodname}}](kubernetes-nodes.mdx) -- [Defend against DoS attacks](../extreme-traffic/defend-dos-attack.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Host endpoint](../../reference/resources/hostendpoint.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/index.mdx b/calico_versioned_docs/version-3.25/network-policy/index.mdx deleted file mode 100644 index c4222c19d8..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Calico Network Policy and Calico Global Network Policy are the fundamental resources to secure workloads and hosts, and to adopt a zero trust security model. -hide_table_of_contents: true ---- - -# Security - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/app-layer-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/app-layer-policy.mdx deleted file mode 100644 index 2419bfbab8..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/istio/app-layer-policy.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -description: Enforce network policy for Istio service mesh including matching on HTTP methods and paths. ---- -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -# Enforce network policy for Istio - -## Big picture - -{{prodname}} integrates seamlessly with Istio to enforce network policy within the Istio service mesh. - -## Value - -{{prodname}} network policy for Istio lets you enforce application layer attributes like HTTP methods or paths, and cryptographically secure identities for Istio-enabled apps. - -## Concepts - -### Benefits of the Istio integration - -The {{prodname}} support for Istio service mesh has the following benefits: - -- **Pod traffic controls** - - Lets you restrict ingress traffic inside and outside pods and mitigate common threats to Istio-enabled apps. - -- **Supports security goals** - - Enables adoption of a zero trust network model for security, including traffic encryption, multiple enforcement points, and multiple identity criteria for authentication. - -- **Familiar policy language** - - Kubernetes network policies and {{prodname}} network policies work as is; users do not need to learn another network policy model to adopt Istio. - -See [Enforce network policy using Istio tutorial](enforce-policy-istio.mdx) to learn how application layer policy provides second-factor authentication for the mythical Yao Bank. - -## Before you begin - -**Required** - -- [{{prodname}} is installed](../../getting-started/kubernetes/index.mdx) -- [calicoctl is installed and configured](../../operations/calicoctl/install.mdx) - -**Istio support** - -Following Istio versions have been verified to work with application layer policies: - -- Istio v1.15.2 -- Istio v1.10.2 - -Istio v1.9.x and lower are **not** supported. - -Although we expect future minor versions to work with the corresponding manifest below (for example, v1.10.2 or v1.15.2), manifest compatibility depends entirely on the upstream changes in the respective Istio release. - -## How to - -1. [Enable application layer policy](#enable-application-layer-policy) -2. [Install Calico CSI Driver](#install-calico-csi-driver) -3. [Install Istio](#install-istio) -4. [Update Istio sidecar injector](#update-istio-sidecar-injector) -5. [Add Calico authorization services to the mesh](#add-calico-authorization-services-to-the-mesh) -6. [Add namespace labels](#add-namespace-labels) - -### Enable application layer policy - -To enable the application layer policy, you must enable the **Policy Sync API** on Felix cluster-wide. - -In the default **FelixConfiguration**, set the field, `policySyncPathPrefix` to `/var/run/nodeagent`: - - - - -```bash -calicoctl patch FelixConfiguration default --patch \ - '{"spec": {"policySyncPathPrefix": "/var/run/nodeagent"}}' -``` - - - - -```bash -kubectl patch FelixConfiguration default --type=merge --patch \ - '{"spec": {"policySyncPathPrefix": "/var/run/nodeagent"}}' -``` - - - - -Additionally, if you have installed Calico via the operator, you can optionally disable flexvolumes. -Flexvolumes were used in earlier implementations and have since been deprecated. - -```bash -kubectl patch installation default --type=merge -p '{"spec": {"flexVolumePath": "None"}}' -``` - -### Install Calico CSI Driver - -{{prodname}} utilizes a Container Storage Interface (CSI) driver to help set up the policy sync API on every node. -Apply the following to install the Calico CSI driver - -```bash -kubectl apply -f {{manifestsUrl}}/manifests/csi-driver.yaml -``` - -### Install Istio - -1. Verify [application layer policy requirements](../../getting-started/kubernetes/requirements.mdx#application-layer-policy-requirements). -2. Install Istio using [installation guide in the project documentation](https://istio.io/v1.15/docs/setup/install/). - -```bash -curl -L https://git.io/getLatestIstio | ISTIO_VERSION=1.15.2 sh - -cd $(ls -d istio-* --color=never) -./bin/istioctl install -``` - -Next, create the following [PeerAuthentication](https://istio.io/v1.15/docs/reference/config/security/peer_authentication/) policy. - -Replace `namespace` below by `rootNamespace` value, if it's customized in your environment. - -```bash -kubectl create -f - < - - -```bash -curl {{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.15.yaml -o istio-inject-configmap.yaml -kubectl patch configmap -n istio-system istio-sidecar-injector --patch "$(cat istio-inject-configmap.yaml)" -``` - -[View sample manifest]({{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.15.yaml) - - - - -```bash -curl {{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.10.yaml -o istio-inject-configmap.yaml -kubectl patch configmap -n istio-system istio-sidecar-injector --patch "$(cat istio-inject-configmap.yaml)" -``` - -[View sample manifest]({{manifestsUrl}}/manifests/alp/istio-inject-configmap-1.10.yaml) - - - - -### Add Calico authorization services to the mesh - -Apply the following manifest to configure Istio to query {{prodname}} for application layer policy authorization decisions. - -This applies to Istio v1.15.x and v1.10.x: - -```bash -kubectl apply -f {{manifestsUrl}}/manifests/alp/istio-app-layer-policy-envoy-v3.yaml -``` - -[View sample manifest]({{manifestsUrl}}/manifests/alp/istio-app-layer-policy-envoy-v3.yaml) - -### Add namespace labels - -You can control enforcement of application layer policy on a per-namespace basis. However, this only works on pods that are started with the Envoy and {{prodname}} Dikastes sidecars (as noted in the step, Update Istio sidecar injector). Pods that do not have the {{prodname}} sidecars, enforce only standard {{prodname}} network policy. - -To enable Istio and application layer policy in a namespace, add the label `istio-injection=enabled`. - -```bash -kubectl label namespace istio-injection=enabled -``` - -If the namespace already has pods in it, you must recreate them for this to take effect. - -:::note - -Envoy must be able to communicate with the `istio-pilot.istio-system service`. If you apply any egress policies to your pods, you _must_ enable access. - -```bash -kubectl apply -f {{tutorialFilesURL}}/allow-istio-pilot.yaml -``` - -::: - -## Additional resources - -- [Enforce network policy using Istio tutorial](enforce-policy-istio.mdx) -- [Use HTTP methods and paths in policy rules](http-methods.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/enforce-policy-istio.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/enforce-policy-istio.mdx deleted file mode 100644 index eb52f8465c..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/istio/enforce-policy-istio.mdx +++ /dev/null @@ -1,232 +0,0 @@ ---- -description: Learn how Calico integrates with Istio to provide fine-grained access control using Calico network policies enforced within the service mesh and network layer. ---- - -# Enforce Calico network policy using Istio (tutorial) - -This tutorial sets up a microservices application, then demonstrates how to use {{prodname}} application layer policy to mitigate some common threats. - -:::note - -This tutorial was verified using Istio v1.10.2. Some content may not apply to the latest Istio version. - -::: - -## Prerequisites - -1. Build a Kubernetes cluster. -2. Install {{prodname}} on Kubernetes: - -- If {{prodname}} is not installed on Kubernetes, see [Calico on Kubernetes](../../getting-started/kubernetes/quickstart.mdx). -- If {{prodname}} is already installed on Kubernetes, verify that [Calico networking](../../networking/index.mdx) (or a non-Calico CNI) and {{prodname}} network policy are installed. - -3. Install the [calicoctl command line tool](../../operations/calicoctl/install.mdx). - **Note**: Ensure calicoctl is configured to connect with your datastore. -4. [Enable application layer policy](app-layer-policy.mdx). - **Note**: Label the default namespace for the Istio sidecar injection (`istio-injection=enabled`). - `kubectl label namespace default istio-injection=enabled` - -### Install the demo application - -We will use a simple microservice application to demonstrate {{prodname}} -application layer policy. The [YAO Bank](https://github.com/projectcalico/yaobank) application creates a -customer-facing web application, a microservice that serves up account -summaries, and an [etcd](https://github.com/coreos/etcd) datastore. - -```bash -kubectl apply -f {{tutorialFilesURL}}/10-yaobank.yaml -``` - -:::note - -You can also -[view the manifest in your browser](/files/10-yaobank.yaml). - -::: - -Verify that the application pods have been created and are ready. - -```bash -kubectl get pods -``` - -When the demo application has come up, you will see three pods. - -``` -NAME READY STATUS RESTARTS AGE -customer-2809159614-qqfnx 3/3 Running 0 21h -database-1601951801-m4w70 3/3 Running 0 21h -summary-2817688950-g1b3n 3/3 Running 0 21h -``` - -### Determining ingress IP and port - -You will use the `istio-ingressgateway` service to access the YAO Bank -application. Determine your ingress host and port [following the Istio instructions](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/#determining-the-ingress-ip-and-ports). Once you have the `INGRESS_HOST` and `INGRESS_PORT` variables set, you can -set the `GATEWAY_URL` as follows. - -```bash -export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT -``` - -Point your browser to `http://$GATEWAY_URL/` to confirm the YAO Bank application is functioning -correctly. It may take several minutes for all the services to come up and respond, during which -time you may see 404 or 500 errors. - -### The need for policy - -Although {{prodname}} & Istio are running in the cluster, we have not defined any authentication -policy. Istio was configured to mutually authenticate traffic between the pods in your application, -so only connections with Istio-issued certificates are allowed, and all inter-pod traffic is encrypted with TLS. That's already a big step in the right direction. - -But, let's consider some deficiencies in this security architecture: - -- All incoming connections from workloads in the Istio mesh are equally trusted -- Possession of a key & certificate pair is the _only_ access credential considered. - -To understand why these might be a problem, let's take them one at a time. - -#### Trusting workloads - -Trusting connections from any workload in the Istio mesh is a poor security architecture because, -like Kubernetes, Istio is designed to host multiple applications. Some of those applications may -not be as trusted as others. They may be operated by different users or teams with wildly different -security requirements. We don't want our secure financial application microservices accessible from -some hacky prototype another developer is cooking up. - -Even within our own application, the best practice is to limit access as much -as possible. Only pods that need access to a service should get it. Consider -the YAO Bank application. The customer web service does not need, and should -not have direct access to the backend database. The customer web service needs -to directly interact with clients outside the cluster, some of whom may be -malicious. Unfortunately, vulnerabilities in web applications are all too -common. For example, an [unpatched vulnerability in Apache Struts](https://nvd.nist.gov/vuln/detail/CVE-2017-5638) - is what allowed -attackers their initial access into the Equifax network where they then -launched a devastating attack to steal millions of people's financial -information. - -Imagine what would happen if an attacker were to gain control of the customer web pod in our -application. Let's simulate this by executing a remote shell inside that pod. - -```bash -kubectl exec -ti customer- -c customer -- bash -``` - -Notice that from here, we get direct access to the backend database. For example, we can list all the entries in the database like this: - -```bash -curl http://database:2379/v2/keys?recursive=true | python -m json.tool -``` - -(Piping to `python -m json.tool` nicely formats the output.) - -#### Single-factor authentication - -The possession of a key and certificate pair is a very strong assertion that a -connection is authentic because it is based on cryptographic proofs that are -believed to be nearly impossible to forge. When we authenticate connections -this way we can say with extremely high confidence that the party on the other -end is in possession of the corresponding key. However, this is only a proxy -for what we actually want to be confident of: that the party on the other end -really is the authorized workload we want to communicate with. Keeping the -private key a secret is vital to this confidence, and occasionally attackers -can find ways to trick applications into giving up secrets they should not. -For example, the [Heartbleed](https://owasp.org/www-community/vulnerabilities/Heartbleed_Bug) vulnerability in OpenSSL allowed attackers to -trick an affected application into reading out portions of its memory, -compromising private keys. - -#### Network policy - -We can mitigate both of the above deficiencies with a {{prodname}} policy. - - wget {{tutorialFilesURL}}/30-policy.yaml - calicoctl create -f 30-policy.yaml - -:::note - -You can also -[view the manifest in your browser](/files/30-policy.yaml). - -::: - -Let's examine this policy piece by piece. It consists of three policy objects, one for each -microservice. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: customer -spec: - selector: app == 'customer' - ingress: - - action: Allow - http: - methods: ['GET'] - egress: - - action: Allow -``` - -This policy protects the customer web app. Since this application is customer facing, we do not -restrict what can communicate with it. We do, however, restrict its communications to HTTP `GET` -requests. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: summary -spec: - selector: app == 'summary' - ingress: - - action: Allow - source: - serviceAccounts: - names: ['customer'] - egress: - - action: Allow -``` - -The second policy protects the account summary microservice. We know the only consumer of this -service is the customer web app, so we restrict the source of incoming connections to the service -account for the customer web app. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: database -spec: - selector: app == 'database' - ingress: - - action: Allow - source: - serviceAccounts: - names: ["summary"] - egress: - - action: Allow -``` - -The third policy protects the database. Only the summary microservice should have direct access to -the database. - -Let's verify our policy is working as intended. First, return to your browser and refresh to -ensure policy enforcement has not broken the application. - -Next, return to the customer web app. Recall that we simulated an attacker gaining control of that -pod by executing a remote shell inside it. - -```bash -kubectl exec -ti customer- -c customer bash -``` - -Repeat our attempt to access the database. - -```bash -curl -I http://database:2379/v2/keys?recursive=true -``` - -We have left out the JSON formatting because we do not expect to get a valid JSON response. This -time we should get a `403 Forbidden` response. Only the account summary microservice has database -access according to our policy. diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/http-methods.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/http-methods.mdx deleted file mode 100644 index 4d9b51bf66..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/istio/http-methods.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: Create a Calico network policy for Istio-enabled apps to restrict ingress traffic matching HTTP methods or paths. ---- - -# Use HTTP methods and paths in policy rules - -## Big picture - -Use Calico network policy for Istio-enabled apps to restrict ingress traffic that matches HTTP methods or paths. - -## Value - -Istio is ideal for applying policy for operational goals and for security that operates at the application layer. However, for security goals inside and outside the cluster, Calico network policy is required. Using special Calico network policy designed for Istio-enabled apps, you can restrict ingress traffic inside and outside pods using HTTP methods (for example, GET requests). - -## Concepts - -### HTTP match criteria: ingress traffic only - -Calico network policy supports restricting traffic based on HTTP methods and paths only for ingress traffic. - -## Before you begin... - -[Enable application layer policy](app-layer-policy.mdx) - -## How to - -**Restrict ingress traffic using HTTP match criteria** - -In the following example, the trading app is allowed ingress traffic only for HTTP GET requests that match the exact path **/projects/calico**, or that begins with the prefix, **/users**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: customer -spec: - selector: app == 'tradingapp' - ingress: - - action: Allow - http: - methods: ['GET'] - paths: - - exact: '/projects/calico' - - prefix: '/users' - egress: - - action: Allow -``` diff --git a/calico_versioned_docs/version-3.25/network-policy/istio/index.mdx b/calico_versioned_docs/version-3.25/network-policy/istio/index.mdx deleted file mode 100644 index 26453af30c..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/istio/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure the Calico "application layer policy" with application layer-specific attributes for Istio service mesh. -hide_table_of_contents: true ---- - -# Policy for Istio - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/non-privileged.mdx b/calico_versioned_docs/version-3.25/network-policy/non-privileged.mdx deleted file mode 100644 index bd3c30488b..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/non-privileged.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Run long-lived Calico components without root or system admin privileges. ---- - -# Run Calico node as non-privileged and non-root - -## Big picture - -Run long-lived {{prodname}} components in non-privileged and non-root containers. - -## Value - -Running {{prodname}} in non-privileged and non-root mode is an option for users who -want to secure {{prodname}} as much as possible, and who do not care about -{{prodname}} features beyond the basic {{prodname}} networking and network policy. -The tradeoff for more security is the overhead of {{prodname}} networking management. -For example, you no longer receive {{prodname}} corrections to misconfigurations caused -by other components within your cluster, along with limited support for new features. - -## Concepts - -To run {{prodname}} as securely as possible, long-running {{prodname}} components -(for example calico/node), can be run without privileged and root permissions in their respective -containers. Note that to set up these components, the init containers still need to run with -privileged and root permissions, but the risk to cluster security is minimal because of the -ephemeral nature of init containers. - -## Supported - -- Operator installation only. - -## Unsupported - -- {{prodname}} Enterprise -- eBPF dataplane -- WorkloadSourceSpoofing felix option and the related `cni.projectcalico.org/allowedSourcePrefixes` annotation - -:::note - -Support for features added after Calico v3.21 is not guaranteed. - -::: - -## How to - -1. Follow the Tigera {{prodname}} operator [installation instructions](../getting-started/kubernetes/quickstart.mdx). - If you have already installed the operator, skip to the next step. - -1. Edit the {{prodname}} installation to set the `nonPrivileged` field to `Enabled`. - - ``` - kubectl edit installation default - ``` - - Your installation resource should look similar to the following: - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - calicoNetwork: - bgp: Enabled - hostPorts: Enabled - ipPools: - - blockSize: 26 - cidr: 192.168.0.0/16 - encapsulation: VXLANCrossSubnet - natOutgoing: Enabled - nodeSelector: all() - linuxDataplane: Iptables - multiInterfaceMode: None - nodeAddressAutodetectionV4: - firstFound: true - cni: - ipam: - type: Calico - type: Calico - controlPlaneReplicas: 2 - flexVolumePath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/ - nodeUpdateStrategy: - rollingUpdate: - maxUnavailable: 1 - type: RollingUpdate - nonPrivileged: Enabled - variant: Calico - ``` - -1. The `calico-node` pods in the `calico-system` namespace should now restart. Verify that they restart properly. - ``` - watch kubectl get pods -n calico-system - ``` - -{{prodname}} should now be running `calico-node` in non-privileged and non-root containers. diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/external-ips-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/external-ips-policy.mdx deleted file mode 100644 index fc5ceeefae..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/external-ips-policy.mdx +++ /dev/null @@ -1,112 +0,0 @@ ---- -description: Limit egress and ingress traffic using IP address either directly within Calico network policy or managed as Calico network sets. ---- - -# Use external IPs or networks rules in policy - -## Big picture - -Use {{prodname}} network policy to limit traffic to/from external non-{{prodname}} workloads or networks. - -## Value - -Modern applications often integrate with third-party APIs and SaaS services that live outside Kubernetes clusters. To securely enable access to those integrations, network security teams must be able to limit IP ranges for egress and ingress traffic to workloads. This includes using IP lists or ranges to deny-list bad actors or embargoed countries. - -Using {{prodname}} network policy, you can define IP addresses/CIDRs directly in policy to limit traffic to external networks. Or using {{prodname}} network sets, you can easily scale out by using the same set of IPs in multiple policies. - -## Concepts - -### IP addresses/CIDRs - -IP addresses and CIDRs can be specified directly in both Kubernetes and {{prodname}} network policy rules. {{prodname}} network policy supports IPV4 and IPV6 CIDRs. - -### Network sets - -A **network set** resource is an arbitrary set of IP subnetworks/CIDRs that can be matched by standard label selectors in Kubernetes or {{prodname}} network policy. This is useful to reference a set of IP addresses using a selector from a namespaced network policy resource. It is typically used when you want to scale/reuse the same set of IP addresses in policy. - -A **global network set** resource is similar, but can be selected only by {{prodname}} global network policies. - -## How to - -- [Limit traffic to or from external networks, IPs in network policy](#limit-traffic-to-or-from-external-networks-ips-in-network-policy) -- [Limit traffic to or from external networks, global network set](#limit-traffic-to-or-from-external-networks-global-network-set) - -### Limit traffic to or from external networks, IPs in network policy - -In the following example, a {{prodname}} NetworkPolicy allows egress traffic from pods with the label **color: red**, if it goes to an IP address in the 192.0.2.0/24 CIDR block. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-egress-external - namespace: production -spec: - selector: color == 'red' - types: - - Egress - egress: - - action: Allow - destination: - nets: - - 192.0.2.0/24 -``` - -### Limit traffic to or from external networks, global network set - -In this example, we use a {{prodname}} **GlobalNetworkSet** and reference it in a **GlobalNetworkPolicy**. - -In the following example, a {{prodname}} **GlobalNetworkSet** deny-lists the CIDR ranges 192.0.2.55/32 and 203.0.113.0/24: - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: ip-protect - labels: - ip-deny-list: 'true' -spec: - nets: - - 192.0.2.55/32 - - 203.0.113.0/24 -``` - -Next, we create two {{prodname}} **GlobalNetworkPolicy** objects. The first is a high “order” policy that allows traffic as a default for things that don’t match our second policy, which is low “order” and uses the **GlobalNetworkSet** label as a selector to deny ingress traffic (IP-deny-list in the previous step). In the label selector, we also include the term **!has(projectcalico.org/namespace)**, which prevents this policy from matching pods or NetworkSets that also have this label. To more quickly enforce the denial of forwarded traffic to the host at the packet level, use the **doNotTrack** and **applyOnForward** options. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: forward-default-allow -spec: - selector: apply-ip-protect == 'true' - order: 1000 - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Allow ---- -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: ip-protect -spec: - selector: apply-ip-protect == 'true' - order: 0 - doNotTrack: true - applyOnForward: true - types: - - Ingress - ingress: - - action: Deny - source: - selector: ip-deny-list == 'true' && !has(projectcalico.org/namespace) -``` - -## Additional resources - -- To understand how to use global network sets to mitigate common threats, see [Defend against DoS attacks](../extreme-traffic/defend-dos-attack.mdx) -- [Global network sets](../../reference/resources/globalnetworkset.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/icmp-ping.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/icmp-ping.mdx deleted file mode 100644 index 22a18b75fa..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/icmp-ping.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -description: Control where ICMP/ping is used by creating a Calico network policy to allow and deny ICMP/ping messages for workloads and host endpoints. ---- - -# Use ICMP/ping rules in policy - -## Big picture - -Use {{prodname}} network policy to allow and deny ICMP/ping messages. - -## Value - -The **Internet Control Message Protocol (ICMP)** provides valuable network diagnostic functions, but it can also be used maliciously. Attackers can use -it to learn about your network, or for DoS attacks. Using {{prodname}} network policy, you can control where ICMP is used. For example, you can: - -- Allow ICMP ping, but only for workloads, host endpoints (or both) -- Allow ICMP for pods launched by operators for diagnostic purposes, but block other uses -- Temporarily enable ICMP to diagnose a problem, then disable it after the problem is resolved -- Deny/allow ICMPv4 and/or ICMPv6 - -## Concepts - -### ICMP packet type and code - -{{prodname}} network policy also lets you deny and allow ICMP traffic based on specific types and codes. For example, you can specify ICMP type 5, code 2 to match specific ICMP redirect packets. - -For details, see [ICMP type and code](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages). - -## How to - -- [Deny all ICMP, all workloads and host endpoints](#deny-all-icmp-all-workloads-and-host-endpoints) -- [Allow ICMP ping, all workloads and host endpoints](#allow-icmp-ping-all-workloads-and-host-endpoints) -- [Allow ICMP matching protocol type and code, all Kubernetes pods](#allow-icmp-matching-protocol-type-and-code-all-Kubernetes-pods) - -### Deny all ICMP, all workloads and host endpoints - -In this example, we introduce a "deny all ICMP" **GlobalNetworkPolicy**. - -This policy **selects all workloads and host endpoints**. It enables a default deny for all workloads and host endpoints, in addition to the explicit ICMP deny rules specified in the policy. - -If your ultimate goal is to allow some traffic, have your regular "allow" policies in place before applying a global deny-all ICMP traffic policy. - -In this example, all workloads and host endpoints are blocked from sending or receiving **ICMPv4** and **ICMPv6** messages. - -If **ICMPv6** messages are not used in your deployment, it is still good practice to deny them specifically as shown below. - -In any "deny-all" {{prodname}} network policy, be sure to specify a lower order (**order:200**) than regular policies that might allow traffic. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: block-icmp -spec: - order: 200 - selector: all() - types: - - Ingress - - Egress - ingress: - - action: Deny - protocol: ICMP - - action: Deny - protocol: ICMPv6 - egress: - - action: Deny - protocol: ICMP - - action: Deny - protocol: ICMPv6 -``` - -### Allow ICMP ping, all workloads and host endpoints - -In this example, workloads and host endpoints can receive **ICMPv4 type 8** and **ICMPv6 type 128** ping requests that come from other workloads and host endpoints. - -All other traffic may be allowed by other policies. If traffic is not explicitly allowed, it will be denied by default. - -The policy applies only to **ingress** traffic. (Egress traffic is not affected, and default deny is not enforced for egress.) - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-ping-in-cluster -spec: - selector: all() - types: - - Ingress - ingress: - - action: Allow - protocol: ICMP - source: - selector: all() - icmp: - type: 8 Ping request - - action: Allow - protocol: ICMPv6 - source: - selector: all() - icmp: - type: 128 Ping request -``` - -### Allow ICMP matching protocol type and code, all Kubernetes pods - -In this example, only Kubernetes pods that match the selector **projectcalico.org/orchestrator == 'kubernetes'** are allowed to receive ICMPv4 **code: 1 host unreachable** messages. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-host-unreachable -spec: - selector: projectcalico.org/orchestrator == 'kubernetes' - types: - - Ingress - ingress: - - action: Allow - protocol: ICMP - icmp: - type: 3 Destination unreachable - code: 1 Host unreachable -``` - -## Additional resources - -For more on the ICMP match criteria, see: - -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Network policy](../../reference/resources/networkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/index.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/index.mdx deleted file mode 100644 index c035f8e7de..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Control traffic to/from endpoints using Calico network policy rules. -hide_table_of_contents: true ---- - -# Policy rules - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/namespace-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/namespace-policy.mdx deleted file mode 100644 index 26b83b6738..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/namespace-policy.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -description: Use namespaces and namespace selectors in Calico network policy to group or separate resources. Use network policies to allow or deny traffic to/from pods that belong to specific namespaces. ---- - -# Use namespace rules in policy - -## Big picture - -Use {{prodname}} network policies to reference pods in other namespaces. - -## Value - -Kubernetes namespaces let you group/separate resources to meet a variety of use cases. For example, you can use namespaces to separate development, production, and QA environments, or allow different teams to use the same cluster. You can use namespace selectors in {{prodname}} network policies to allow or deny traffic to/from pods in specific namespaces. - -## How to - -- [Control traffic to/from endpoints in a namespace](#control-traffic-tofrom-endpoints-in-a-namespace) -- [Use Kubernetes RBAC to control namespace label assignment](#use-kubernetes-rbac-to-control-namespace-label-assignment) - -### Control traffic to/from endpoints in a namespace - -In the following example, ingress traffic is allowed to endpoints in the **namespace: production** with label **color: red**, and only from a pod in the same namespace with **color: blue**, on **port 6379**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - destination: - ports: - - 6379 -``` - -To allow ingress traffic from endpoints in other namespaces, use a **namespaceSelector** in the policy rule. A namespaceSelector matches one or more namespaces based on the labels that are applied on the namespace. In the following example, ingress traffic is also allowed from endpoints with **color: blue** in namespaces with **shape: circle**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -spec: - selector: color == 'red' - ingress: - - action: Allow - protocol: TCP - source: - selector: color == 'blue' - namespaceSelector: shape == 'circle' - destination: - ports: - - 6379 -``` - -### Use Kubernetes RBAC to control namespace label assignment - -Network policies can be applied to endpoints using selectors that match labels on the endpoint, the endpoint's namespace, or the endpoint's service account. By applying selectors based on the endpoint's namespace, you can use Kubernetes RBAC to control which users can assign labels to namespaces. This allows you to separate groups who can deploy pods from those who can assign labels to namespaces. - -In the following example, users in the development environment can communicate only with pods that have a namespace labeled, `environment == "development"`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: restrict-development-access -spec: - namespaceSelector: 'environment == "development"' - ingress: - - action: Allow - source: - namespaceSelector: 'environment == "development"' - egress: - - action: Allow - destination: - namespaceSelector: 'environment == "development"' -``` - -## Additional resources - -- For more network policies, see [Network policy](../../reference/resources/networkpolicy.mdx) -- To apply policy to all namespaces, see [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/policy-rules-overview.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/policy-rules-overview.mdx deleted file mode 100644 index c313a002b6..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/policy-rules-overview.mdx +++ /dev/null @@ -1,22 +0,0 @@ ---- -description: Define network connectivity for Calico endpoints using policy rules and label selectors. ---- - -# Basic rules - -## Big picture - -Use Calico policy rules and label selectors that match Calico endpoints (pods, OpenStack VMs, and host interfaces) to define network connectivity. - -## Value - -Using label selectors to identify the endpoints (pods, OpenStack VMs, host interfaces) that a policy applies to, or that should be selected by policy rules, means you can define policy without knowing the IP addresses of the endpoints. This is ideal for handling dynamic workloads with ephemeral IPs (such as Kubernetes pods). - -## How to - -Read [Get started with Calico policy](../get-started/calico-policy/calico-network-policy.mdx) and [Kubernetes policy](../get-started/kubernetes-policy/kubernetes-network-policy.mdx), which cover all the basics of using label selectors in policies to select endpoints the policies apply to, or in policy rules. - -## Additional resources - -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Network policy](../../reference/resources/networkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-accounts.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-accounts.mdx deleted file mode 100644 index bcd327eadf..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-accounts.mdx +++ /dev/null @@ -1,118 +0,0 @@ ---- -description: Use Kubernetes service accounts in policies to validate cryptographic identities and/or manage RBAC controlled high-priority rules across teams. ---- - -# Use service accounts rules in policy - -## Big picture - -Use {{prodname}} network policy to allow/deny traffic for Kubernetes service accounts. - -## Value - -Using {{prodname}} network policy, you can leverage Kubernetes service accounts with RBAC for flexible control over how policies are applied in a cluster. For example, the security team can have RBAC permissions to: - -- Control which service accounts the developer team can use within a namespace -- Write high-priority network policies for those service accounts (that the developer team cannot override) - -The network security team can maintain full control of security, while selectively allowing developer operations where it makes sense. - -Using **Istio-enabled apps** with {{prodname}} network policy, the cryptographic identity associated with the service account is checked (along with the network identity) to achieve two-factor authentication. - -## Concepts - -### Use smallest set of permissions required - -Operations on service accounts are controlled by RBAC, so you can grant permissions only to trusted entities (code and/or people) to create, modify, or delete service accounts. To perform any operation in a workload, clients are required to authenticate with the Kubernetes API server. - -If you do not explicitly assign a service account to a pod, it uses the default ServiceAccount in the namespace. - -You should not grant broad permissions to the default service account for a namespace. If an application needs access to the Kubernetes API, create separate service accounts with the smallest set of permissions required. - -### Service account labels - -Like all other Kubernetes objects, service accounts have labels. You can use labels to create ‘groups’ of service accounts. {{prodname}} network policy lets you select workloads by their service account using: - -- An exact match on service account name -- A service account label selector expression - -## Before you begin... - -Configure unique Kubernetes service accounts for your applications. - -## How to - -- [Limit ingress traffic for workloads by service account name](#limit-ingress-traffic-for-workloads-by-service-account-name) -- [Limit ingress traffic for workloads by service account label](#limit-ingress-traffic-for-workloads-by-service-account-label) -- [Use Kubernetes RBAC to control service account label assignment](#use-kubernetes-rbac-to-control-service-account-label-assignment) - -### Limit ingress traffic for workloads by service account name - -In the following example, ingress traffic is allowed from any workload whose service account matches the names **api-service** or **user-auth-service**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: demo-calico - namespace: prod-engineering -spec: - ingress: - - action: Allow - source: - serviceAccounts: - names: - - api-service - - user-auth-service - selector: 'app == "db"' -``` - -### Limit ingress traffic for workloads by service account label - -In the following example, ingress traffic is allowed from any workload whose service account matches the label selector, **app == web-frontend**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-web-frontend - namespace: prod-engineering -spec: - ingress: - - action: Allow - source: - serviceAccounts: - selector: 'app == "web-frontend"' - selector: 'app == "db"' -``` - -### Use Kubernetes RBAC to control service account label assignment - -Network policies can be applied to endpoints using selectors that match labels on the endpoint, the endpoint's namespace, or the endpoint's service account. By applying selectors based on the endpoint's service account, you can use Kubernetes RBAC to control which users can assign labels to service accounts. This allows you to separate groups who can deploy pods from those who can assign labels to service accounts. - -In the following example, pods with an intern service account can communicate only with pods with service accounts labeled, `role: intern`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: restrict-intern-access - namespace: prod-engineering -spec: - serviceAccountSelector: 'role == "intern"' - ingress: - - action: Allow - source: - serviceAccounts: - selector: 'role == "intern"' - egress: - - action: Allow - destination: - serviceAccounts: - selector: 'role == "intern"' -``` - -## Additional resources - -- [Network policy](../../reference/resources/networkpolicy.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-policy.mdx b/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-policy.mdx deleted file mode 100644 index ba2220ae01..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/policy-rules/service-policy.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -description: Use Kubernetes Service names in policy rules. ---- - -# Use service rules in policy - -## Big picture - -Use {{prodname}} network policy to allow/deny traffic for Kubernetes services. - -## Value - -Using {{prodname}} network policy, you can leverage Kubernetes Service names to easily define access to Kubernetes services. Using service names in policy enables you to: - -- Allow or deny access to the Kubernetes API service. -- Reference port information already declared by the application, making it easier to keep policy up-to-date as application requirements change. - -## How to - -- [Allow access to the Kubernetes API for a specific namespace](#allow-access-to-the-kubernetes-api-for-a-specific-namespace) -- [Allow access to Kubernetes DNS for the entire cluster](#allow-access-to-kubernetes-dns-for-the-entire-cluster) -- [Allow access from a specified service](#allow-access-from-a-specified-service) - -### Allow access to the Kubernetes API for a specific namespace - -In the following example, egress traffic is allowed to the `kubernetes` service in the `default` namespace for all pods in the namespace `my-app`. This service is the typical -access point for the Kubernetes API server. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-api-access - namespace: my-app -spec: - selector: all() - egress: - - action: Allow - destination: - services: - name: kubernetes - namespace: default -``` - -Endpoint addresses and ports to allow will be automatically detected from the service. - -### Allow access to Kubernetes DNS for the entire cluster - -In the following example, a GlobalNetworkPolicy is used to select all pods in the cluster to apply a rule which ensures -all pods can access the Kubernetes DNS service. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-kube-dns -spec: - selector: all() - egress: - - action: Allow - destination: - services: - name: kube-dns - namespace: kube-system -``` - -:::note - -This policy also enacts a default-deny behavior for all pods, so make sure any other required application traffic is allowed by a policy. - -::: - -## Allow access from a specified service - -In the following example, ingress traffic is allowed from the `frontend-service` service in the `frontend` namespace for all pods in the namespace `backend`. -This allows all pods that back the `frontend-service` service to send traffic to all pods in the `backend` namespace. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-frontend-service-access - namespace: backend -spec: - selector: all() - ingress: - - action: Allow - source: - services: - name: frontend-service - namespace: frontend -``` - -We can also further specify the ports that the `frontend-service` service is allowed to access. The following example limits access from the `frontend-service` -service to port 80. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-frontend-service-access - namespace: backend -spec: - selector: all() - ingress: - - action: Allow - protocol: TCP - source: - services: - name: frontend-service - namespace: frontend - destination: - ports: [80] -``` - -## Additional resources - -- [Network policy](../../reference/resources/networkpolicy.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/services/index.mdx b/calico_versioned_docs/version-3.25/network-policy/services/index.mdx deleted file mode 100644 index 9a8084c99b..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/services/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Apply Calico policy to Kubernetes node ports, and to services that are exposed externally as cluster IPs. -hide_table_of_contents: true ---- - -# Policy for Kubernetes services - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/network-policy/services/kubernetes-node-ports.mdx b/calico_versioned_docs/version-3.25/network-policy/services/kubernetes-node-ports.mdx deleted file mode 100644 index c870f71de1..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/services/kubernetes-node-ports.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -description: Restrict access to Kubernetes node ports using Calico global network policy. Follow the steps to secure the host, the node ports, and the cluster. ---- - -# Apply Calico policy to Kubernetes node ports - -## Big picture - -Restrict access to node ports to specific external clients. - -## Value - -Exposing services to external clients using node ports is a standard Kubernetes feature. However, if you want to restrict access to node ports to specific external clients, you need to use Calico global network policy. - -## Concepts - -### Network policy with preDNAT field - -In a Kubernetes cluster, kube-proxy will DNAT a request to the node's port and IP address to one of the pods that backs the service. For Calico global network policy to both allow normal ingress cluster traffic and deny other general ingress traffic, it must take effect before DNAT. To do this, you simply add a **preDNAT** field to a Calico global network policy. The preDNAT field: - -- Applies before DNAT -- Applies only to ingress rules -- Enforces all ingress traffic through a host endpoint, regardless of destination - The destination can be a locally hosted pod, a pod on another node, or a process running on the host. - -## Before you begin... - -For services that you want to expose to external clients, configure Kubernetes services with type **NodePort**. - -## How to - -To securely expose a Kubernetes service to external clients, you must implement all of the following steps. - -- [Allow cluster ingress traffic, but deny general ingress traffic](#allow-cluster-ingress-traffic-but-deny-general-ingress-traffic) -- [Allow local host egress traffic](#allow-local-host-egress-traffic) -- [Create host endpoints with appropriate network policy](#create-host-endpoints-with-appropriate-network-policy) -- [Allow ingress traffic to specific node ports](#allow-ingress-traffic-to-specific-node-ports) - -### Allow cluster ingress traffic but deny general ingress traffic - -In the following example, we create a global network policy to allow cluster ingress traffic (**allow-cluster-internal-ingress**): for the nodes’ IP addresses (**1.2.3.4/16**), and for pod IP addresses assigned by Kubernetes (**100.100.100.0/16**). By adding a preDNAT field, Calico global network policy is applied before regular DNAT on the Kubernetes cluster. - -In this example, we use the **selector: has(kubernetes-host)** -- so the policy is applicable to any endpoint with a **kubernetes-host** label (but you can easily specify particular nodes). - -Finally, when you specify a preDNAT field, you must also add the **applyOnForward: true** field. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-cluster-internal-ingress-only -spec: - order: 20 - preDNAT: true - applyOnForward: true - ingress: - - action: Allow - source: - nets: [1.2.3.4/16, 100.100.100.0/16] - - action: Deny - selector: has(kubernetes-host) -``` - -### Allow local host egress traffic - -We also need a global network policy to allow egress traffic through each node's external interface. Otherwise, when we define host endpoints for those interfaces, no egress traffic will be allowed from local processes (except for traffic that is allowed by the [Failsafe rules](../../reference/host-endpoints/failsafe.mdx). - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-outbound-external -spec: - order: 10 - egress: - - action: Allow - selector: has(kubernetes-host) -``` - -### Create host endpoints with appropriate network policy - -In this example, we assume that you have already defined Calico host endpoints with network policy that is appropriate for the cluster. (For example, you wouldn’t want a host endpoint with a “default deny all traffic to/from this host” network policy because that is counter to the goal of allowing/denying specific traffic.) For help, see [host endpoints](../../reference/resources/hostendpoint.mdx). - -All of our previously-defined global network policies have a selector that makes them applicable to any endpoint with a **kubernetes-host label**; so we will include that label in our definitions. For example, for **eth0** on **node1**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: node1-eth0 - labels: - kubernetes-host: ingress -spec: - interfaceName: eth0 - node: node1 - expectedIPs: - - INSERT_IP_HERE -``` - -When creating each host endpoint, replace `INSERT_IP_HERE` with the IP address on eth0. The `expectedIPs` field is required so that any selectors within ingress or egress rules can properly match the host endpoint. - -### Allow ingress traffic to specific node ports - -Now we can allow external access to the node ports by creating a global network policy with the preDNAT field. In this example, **ingress traffic is allowed** for any host endpoint with **port: 31852**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-nodeport -spec: - preDNAT: true - applyOnForward: true - order: 10 - ingress: - - action: Allow - protocol: TCP - destination: - selector: has(kubernetes-host) - ports: [31852] - selector: has(kubernetes-host) -``` - -To make the NodePort accessible only through particular nodes, give the nodes a particular label. For example: - -```yaml -nodeport-external-ingress: true -``` - -Then, use **nodeport-external-ingress: true** as the selector of the **allow-nodeport** policy, instead of **has(kubernetes-host)**. - -## Additional resources - -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) -- [Host endpoints](../../reference/resources/hostendpoint.mdx) diff --git a/calico_versioned_docs/version-3.25/network-policy/services/services-cluster-ips.mdx b/calico_versioned_docs/version-3.25/network-policy/services/services-cluster-ips.mdx deleted file mode 100644 index ccb10b829c..0000000000 --- a/calico_versioned_docs/version-3.25/network-policy/services/services-cluster-ips.mdx +++ /dev/null @@ -1,193 +0,0 @@ ---- -description: Expose Kubernetes service cluster IPs over BGP using Calico, and restrict who can access them using Calico network policy. ---- - -# Apply Calico policy to services exposed externally as cluster IPs - -## Big picture - -Control access to services exposed through clusterIPs that are advertised outside the cluster using BGP. - -## Value - -{{prodname}} network policy uses standard Kubernetes Services that allow you to expose services within clusters to external clients in the following ways: - -- [Apply policy to Kubernetes nodeports](kubernetes-node-ports.mdx) -- Using cluster IPs over BGP (described in this article) - -## Concepts - -### Advertise cluster IPs outside the cluster - -A **cluster IP** is a virtual IP address that represents a Kubernetes Service. Kube Proxy on each host translates the clusterIP into a pod IP for one of the pods backing the service, acting as a reverse proxy and load balancer. - -Cluster IPs were originally designed for use within the Kubernetes cluster. {{prodname}} allows you to advertise Cluster IPs externally -- so external clients can use them to access services hosted inside the cluster. This means that {{prodname}} ingress policy can be applied at **one or both** of the following locations: - -- Host interface, when the traffic destined for the clusterIP first ingresses the cluster -- Pod interface of the backend pod - -### Traffic routing: local versus cluster modes - -{{prodname}} implements [Kubernetes service external traffic policy](https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip), which controls whether external traffic is routed to node-local or cluster-wide endpoints. The following table summarizes key differences between these settings. The default is **cluster mode**. - -| **Service setting** | **Traffic is load balanced...** | **Pros and cons** | **Required service type** | -| ------------------------------------------- | --------------------------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| **externalTrafficPolicy: Cluster**(default) | Across all nodes in the cluster | Equal distribution of traffic among all pods running a service.

    Possible unnecessary network hops between nodes for ingress external traffic.When packets are rerouted to pods on another node, traffic is SNAT’d (source network address translation).

    Destination pod can see the proxying node’s IP address rather than the actual client IP. | **ClusterIP** | -| **externalTrafficPolicy: Local** | Across the nodes with the endpoints for the service | Avoids extra hops so better for apps that ingress a lot external traffic.

    Traffic is not SNAT’d so actual client IPs are preserved.

    Traffic distributed among pods running a service may be imbalanced. | **LoadBalancer** (for cloud providers), or **NodePort** (for node’s static port) | - -## Before you begin... - -[Configure Calico to advertise cluster IPs over BGP](../../networking/configuring/advertise-service-ips.mdx). - -## How to - -Selecting which mode to use depends on your goals and resources. At an operational level, **local mode** simplifies policy, but load balancing may be uneven in certain scenarios. **Cluster mode** requires more work to manage clusterIPs, SNAT, and create policies that reference specific IP addresses, but you always get even load balancing. - -- [Secure externally exposed cluster IPs, local mode](#secure-externally-exposed-cluster-ips-local-mode) -- [Secure externally exposed cluster IPs, cluster mode](#secure-externally-exposed-cluster-ips-cluster-mode) - -### Secure externally exposed cluster IPs, local mode - -Using **local mode**, the original source address of external traffic is preserved, and you can define policy directly using standard {{prodname}} network policy. - -1. Create {{prodname}} **NetworkPolicies** or **GlobalNetworkPolicies** that select the same set of pods as your Kubernetes Service. -1. Add rules to allow the external traffic. -1. If desired, add rules to allow in-cluster traffic. - -### Secure externally exposed cluster IPs, cluster mode - -In the following steps, we define **GlobalNetworkPolicy** and **HostEndpoints**. - -#### Step 1: Verify Kubernetes Service manifest - -Ensure that your Kubernetes Service manifest explicitly lists the clusterIP; do not allow Kubernetes to automatically assign the clusterIP because you need it for your policies in the following steps. - -#### Step 2: Create global network policy at the host interface - -In this step, you create a **GlobalNetworkPolicy** that selects all **host endpoints**. It controls access to the cluster IP, and prevents unauthorized clients from outside the cluster from accessing it. The hosts then forwards only authorized traffic. - -**Set policy to allow external traffic for cluster IPs** - -Add rules to allow the external traffic for each clusterIP. The following example allows connections to two cluster IPs. Make sure you add **applyOnForward** and **preDNAT** rules. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-cluster-ips -spec: - selector: k8s-role == 'node' - types: - - Ingress - applyOnForward: true - preDNAT: true - ingress: - # Allow 50.60.0.0/16 to access Cluster IP A - - action: Allow - source: - nets: - - 50.60.0.0/16 - destination: - nets: - - 10.20.30.40/32 Cluster IP A - # Allow 70.80.90.0/24 to access Cluster IP B - - action: Allow - source: - nets: - - 70.80.90.0/24 - destination: - nets: - - 10.20.30.41/32 Cluster IP B -``` - -**Add a rule to allow traffic destined for the pod CIDR** - -Without this rule, normal pod-to-pod traffic is blocked because the policy applies to forwarded traffic. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-to-pods -spec: - selector: k8s-role == 'node' - types: - - Ingress - applyOnForward: true - preDNAT: true - ingress: - # Allow traffic forwarded to pods - - action: Allow - destination: - nets: - - 192.168.0.0/16 Pod CIDR -``` - -**Add a rule to allow traffic destined for all host endpoints** - -Or, you can add rules that allow specific host traffic including Kubernetes and {{prodname}}. Without this rule, normal host traffic is blocked. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-traffic-hostendpoints -spec: - selector: k8s-role == 'node' - types: - - Ingress - # Allow traffic to the node (not nodePorts, TCP) (not nodePorts, TCP) - - action: Allow - protocol: TCP - destination: - selector: k8s-role == 'node' - notPorts: ["30000:32767"] #nodePort range - # Allow traffic to the node (not nodePorts, TCP) (not nodePorts, UDP) - - action: Allow - protocol: UDP - destination: - selector: k8s-role == 'node' - notPorts: ["30000:32767"] #nodePort range -``` - -#### Step 3: Create a global network policy that selects pods - -In this step, you create a **GlobalNetworkPolicy** that selects the **same set of pods as your Kubernetes Service**. Add rules that allow host endpoints to access the service ports. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-nodes-svc-a -spec: - selector: k8s-svc == 'svc-a' - types: - - Ingress - ingress: - - action: Allow - protocol: TCP - source: - selector: k8s-role == 'node' - destination: - ports: [80, 443] - - action: Allow - protocol: UDP - source: - selector: k8s-role == 'node' - destination: - ports: [80, 443] -``` - -#### Step 4: (Optional) Create network polices or global network policies that allow in-cluster traffic to access the service - -#### Step 5: Create HostEndpoints - -Create HostEndpoints for the interface of each host that will receive traffic for the clusterIPs. Be sure to label them so they are selected by the policy in Step 2 (Add a rule to allow traffic destined for the pod CIDR), and the rules in Step 3. - -In the previous example policies, the label **k8s-role: node** is used to identify these HostEndpoints. - -## Additional resources - -- [Enable service IP advertisement](../../networking/configuring/advertise-service-ips.mdx) -- [Defend against DoS attacks](../extreme-traffic/defend-dos-attack.mdx) -- [Global network policy](../../reference/resources/globalnetworkpolicy.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/configuring/advertise-service-ips.mdx b/calico_versioned_docs/version-3.25/networking/configuring/advertise-service-ips.mdx deleted file mode 100644 index b0217ce8d7..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/advertise-service-ips.mdx +++ /dev/null @@ -1,246 +0,0 @@ ---- -description: Configure Calico to advertise Kubernetes service cluster IPs and external IPs outside the cluster using BGP. ---- - -# Advertise Kubernetes service IP addresses - -## Big picture - -Enable {{prodname}} to advertise Kubernetes service IPs outside a cluster. {{prodname}} supports advertising a service’s cluster IPs and external IPs. - -## Value - -Typically, Kubernetes service cluster IPs are accessible only within the cluster, so external access to the service requires a dedicated load balancer or ingress controller. In cases where a service’s cluster IP is not routable, the service can be accessed using its external IP. - -Just as {{prodname}} supports advertising **pod IPs** over BGP, it also supports advertising Kubernetes **service IPs** outside a cluster over BGP. This avoids the need for a dedicated load balancer. This feature also supports equal cost multi-path (ECMP) load balancing across nodes in the cluster, as well as source IP address preservation for local services when you need more control. - -## Concepts - -### BGP makes it easy - -In Kubernetes, all requests for a service are redirected to an appropriate endpoint (pod) backing that service. Because {{prodname}} uses BGP, external traffic can be routed directly to Kubernetes services by advertising Kubernetes service IPs into the BGP network. - -If your deployment is configured to peer with BGP routers outside the cluster, those routers (plus any other upstream places the routers propagate to) can send traffic to a Kubernetes service IP for routing to one of the available endpoints for that service. - -### Advertising service IPs: quick glance - -{{prodname}} implements the Kubernetes **externalTrafficPolicy** using kube-proxy to direct incoming traffic to a correct pod. Advertisement is handled differently based on the service type that you configure for your service. - -| **Service mode** | **Cluster IP advertisement** | **Traffic is...** | Source IP address is... | -| ----------------- | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------- | -| Cluster (default) | All nodes in the cluster statically advertise a route to the service CIDR. | Load balanced across nodes in the cluster using ECMP, then forwarded to appropriate pod in the service using SNAT. May incur second hop to another node, but good overall load balancing. | Obscured by SNAT | -| Local | The nodes with a pod backing the service advertise a specific route (/32 or /128) to the service's IP. | Load balanced across nodes with endpoints for the service. Avoids second hop for LoadBalancer and NodePort type services, traffic may be unevenly load balanced. (Other traffic is load balanced across nodes in the cluster.) | Preserved | - -If your {{prodname}} deployment is configured to peer with BGP routers outside the cluster, those routers - plus any further upstream places that those routers propagate to - will be able to send traffic to a Kubernetes service cluster IP, and that traffic is routed to one of the available endpoints for that service. - -### Tips for success - -- Generally, we recommend using “Local” for the following reasons: - - If any of your network policy uses rules to match by specific source IP addresses, using Local is the obvious choice because the source IP address is not altered, and the policy will still work. - - Return traffic is routed directly to the source IP because “Local” services do not require undoing the source NAT (unlike “Cluster” services). -- Cluster IP advertisement works best with a ToR that supports ECMP. Otherwise, all traffic for a given route is directed to a single node. - -## Before you begin... - -**Required** - -- [Configure BGP peering](bgp.mdx) between {{prodname}} and your network infrastructure -- For ECMP load balancing to services, the upstream routers must be configured to use BGP multipath. -- You need at least one external node outside the cluster that acts as a router, route reflector, or ToR that is peered with calico nodes inside the cluster. -- Services must be configured with the correct service mode (“Cluster” or “Local”) for your implementation. For `externalTrafficPolicy: Local`, the service must be type `LoadBalancer` or `NodePort`. - -**Limitations** - -- OpenShift, versions 4.5 and 4.6 - There is a [bug](https://github.com/kubernetes/kubernetes/issues/91374) where the source IP is not preserved by NodePort services or traffic via a Service ExternalIP with externalTrafficPolicy:Local. - - OpenShift users on v4.5 or v4.6 can use this [workaround to avoid SNAT with ExternalIP](https://docs.openshift.com/container-platform/4.7/nodes/clusters/nodes-cluster-enabling-features.html): - - ``` - oc edit featuregates.config.openshift.io cluster - spec: - customNoUpgrade: - enabled: - - ExternalPolicyForExternalIP - ``` - - Kubernetes users on version v1.18 or v1.19 can enable source IP preservation for NodePort services using the ExternalPolicyForExternalIP feature gate. - - Source IP preservation for NodePort and services and ExternalIPs is enabled by default in OpenShift v4.7+, and Kubernetes v1.20+. - -## How to - -- [Advertise service cluster IP addresses](#advertise-service-cluster-ip-addresses) -- [Advertise service external IP addresses](#advertise-service-external-ip-addresses) -- [Advertise service load balancer IP addresses](#advertise-service-load-balancer-ip-addresses) -- [Exclude certain nodes from advertisement](#exclude-certain-nodes-from-advertisement) - -### Advertise service cluster IP addresses - -1. Determine the service cluster IP range. (Or ranges, if your cluster is [dual stack](../ipam/ipv6.mdx).) - - The range(s) for your cluster can be inferred from the `--service-cluster-ip-range` option passed to the Kubernetes API server. For help, see the [Kubernetes API server reference guide](https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/). - -1. Check to see if you have a default BGPConfiguration. - - ```bash - calicoctl get bgpconfig default - ``` - -1. Based on above results, update or create a BGPConfiguration. - - **Update default BGPConfiguration** - Patch the BGPConfiguration using the following command, using your own service cluster IP CIDR in place of "10.0.0.0/24": - - ```bash - calicoctl patch bgpconfig default --patch \ - '{"spec": {"serviceClusterIPs": [{"cidr": "10.0.0.0/24"}]}}' - ``` - - **Create default BGPConfiguration** - Use the following sample command to create a default BGPConfiguration. Add your CIDR blocks, covering the cluster IPs to be advertised, in the `serviceClusterIPs` field, for example: - - ```bash - calicoctl create -f - < 100). - -For a deeper look at common on-premises deployment models, see [Calico over IP Fabrics](../../reference/architecture/design/l2-interconnect-fabric.mdx). - -## Before you begin... - -[calicoctl](../../operations/calicoctl/install.mdx) must be installed and configured. - -## How to - -:::note - -Significantly changing {{prodname}}'s BGP topology, such as changing from full-mesh to peering with ToRs, may result in temporary loss of pod network connectivity during the reconfiguration process. It is recommended to only make such changes during a maintenance window. - -::: - -- [Configure a global BGP peer](#configure-a-global-bgp-peer) -- [Configure a per-node BGP peer](#configure-a-per-node-bgp-peer) -- [Configure a node to act as a route reflector](#configure-a-node-to-act-as-a-route-reflector) -- [Disable the default BGP node-to-node mesh](#disable-the-default-bgp-node-to-node-mesh) -- [Change from node-to-node mesh to route reflectors without any traffic disruption](#change-from-node-to-node-mesh-to-route-reflectors-without-any-traffic-disruption) -- [View BGP peering status for a node](#view-bgp-peering-status-for-a-node) -- [Change the default global AS number](#change-the-default-global-as-number) -- [Change AS number for a particular node](#change-as-number-for-a-particular-node) - -### Configure a global BGP peer - -Global BGP peers apply to all nodes in your cluster. This is useful if your network topology includes BGP speakers that will be peered with every {{prodname}} node in your deployment. - -The following example creates a global BGP peer that configures every {{prodname}} node to peer with **192.20.30.40** in AS **64567**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: my-global-peer -spec: - peerIP: 192.20.30.40 - asNumber: 64567 -``` - -### Configure a per-node BGP peer - -Per-node BGP peers apply to one or more nodes in the cluster. You can choose which nodes by specifying the node’s name exactly, or using a label selector. - -The following example creates a BGPPeer that configures every {{prodname}} node with the label, **rack: rack-1** to peer with **192.20.30.40** in AS **64567**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: rack1-tor -spec: - peerIP: 192.20.30.40 - asNumber: 64567 - nodeSelector: rack == 'rack-1' -``` - -### Configure a node to act as a route reflector - -{{prodname}} nodes can be configured to act as route reflectors. To do this, each node that you want to act as a route reflector must have a cluster ID - typically an unused IPv4 address. - -To configure a node to be a route reflector with cluster ID 244.0.0.1, run the following command. - - - - -```bash -kubectl annotate node my-node projectcalico.org/RouteReflectorClusterID=244.0.0.1 -``` - - - - -```bash -calicoctl patch node my-node -p '{"spec": {"bgp": {"routeReflectorClusterID": "244.0.0.1"}}}' -``` - - - - -Typically, you will want to label this node to indicate that it is a route reflector, allowing it to be easily selected by a BGPPeer resource. You can do this with kubectl. For example: - -```bash -kubectl label node my-node route-reflector=true -``` - -Now it is easy to configure route reflector nodes to peer with each other and other non-route-reflector nodes using label selectors. For example: - -```yaml -kind: BGPPeer -apiVersion: projectcalico.org/v3 -metadata: - name: peer-with-route-reflectors -spec: - nodeSelector: all() - peerSelector: route-reflector == 'true' -``` - -:::note - -Adding `routeReflectorClusterID` to a node spec will remove it from the node-to-node mesh immediately, tearing down the -existing BGP sessions. Adding the BGP peering will bring up new BGP sessions. This will cause a short (about 2 seconds) -disruption to dataplane traffic of workloads running in the nodes where this happens. To avoid this, make sure no -workloads are running on the nodes, by provisioning new nodes or by running `kubectl drain` on the node (which may -itself cause a disruption as workloads are drained). - -::: - -### Disable the default BGP node-to-node mesh - -The default **node-to-node BGP mesh** may be turned off to enable other BGP topologies. To do this, modify the default **BGP configuration** resource. - -Run the following command to disable the BGP full-mesh: - -```bash -calicoctl patch bgpconfiguration default -p '{"spec": {"nodeToNodeMeshEnabled": false}}' -``` - -:::note - -If the default BGP configuration resource does not exist, you need to create it first. See [BGP configuration](../../reference/resources/bgpconfig.mdx) for more information. - -::: - -:::note - -Disabling the node-to-node mesh will break pod networking until/unless you configure replacement BGP peerings using BGPPeer resources. -You may configure the BGPPeer resources before disabling the node-to-node mesh to avoid pod networking breakage. - -::: - -### Change from node-to-node mesh to route reflectors without any traffic disruption - -Switching from node-to-node BGP mesh to BGP route reflectors involves tearing down BGP sessions and bringing up new ones. This causes a short -dataplane network disruption (of about 2 seconds) for workloads running on the nodes in the cluster. To avoid this, you may provision -route reflector nodes and bring their BGP sessions up before tearing down the node-to-node mesh sessions. - -Follow these steps to do so: - -1. [Provision new nodes to be route reflectors.](#configure-a-node-to-act-as-a-route-reflector) The nodes [should not be schedulable](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) - and they should have `routeReflectorClusterID` in their spec. These won't be part of the existing - node-to-node BGP mesh, and will be the route reflectors when the mesh is disabled. These nodes should also have a label like - `route-reflector` to select them for the BGP peerings. [Alternatively](https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/), - you can drain workloads from existing nodes in your cluster by running `kubectl drain ` to configure them to be route reflectors, - but this will cause a disruption on the workloads on those nodes as they are drained. - -2. Also set up a [BGPPeer](#configure-a-node-to-act-as-a-route-reflector) spec to configure route reflector nodes to peer with each other and other non-route-reflector nodes - using label selectors. - -3. Wait for these peerings to be established. This can be [verified](#view-bgp-peering-status-for-a-node) by running `sudo calicoctl node status` on the nodes. Alternatively, you can create a [`CalicoNodeStatus` resource](../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node. - -4. [Disable the BGP node-to-node mesh for the cluster.](#disable-the-default-bgp-node-to-node-mesh) - -5. If you did drain workloads from the nodes or created them as unschedulable, mark the nodes as schedulable again (e.g. by running `kubectl uncordon `). - -### View BGP peering status for a node - -Create a [CalicoNodeStatus resource](../../reference/resources/caliconodestatus.mdx) to monitor BGP session status for the node. - -Alternatively, you can run the `calicoctl node status` command on a given node to learn more about its BGP status. - -:::note - -This command communicates with the local {{prodname}} agent, so you must execute it on the node whose status you are attempting to view. - -::: - -### Change the default global AS number - -By default, all Calico nodes use the 64512 autonomous system, unless a per-node AS has been specified for the node. You can change the global default for all nodes by modifying the default **BGPConfiguration** resource. The following example command sets the global default AS number to **64513**. - -```bash -calicoctl patch bgpconfiguration default -p '{"spec": {"asNumber": "64513"}}' -``` - -:::note - -If the default BGP configuration resource does not exist, you need to create it first. See [BGP configuration](../../reference/resources/bgpconfig.mdx) for more information. - -::: - -### Change AS number for a particular node - -You can configure an AS for a particular node by modifying the node object using `calicoctl`. For example, the following command changes the node named **node-1** to belong to **AS 64514**. - -```bash -calicoctl patch node node-1 -p '{"spec": {"bgp": {"asNumber": "64514"}}}' -``` - -## Additional resources - -- [Node resource](../../reference/resources/node.mdx) -- [BGP configuration resource](../../reference/resources/bgpconfig.mdx) -- [BGP peer resource](../../reference/resources/bgppeer.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/configuring/index.mdx b/calico_versioned_docs/version-3.25/networking/configuring/index.mdx deleted file mode 100644 index d37320632e..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure Calico networking options, including overlay, non-overlay, BGP, service advertisement, MTU, NAT, and using kube-proxy in IPVS mode. -hide_table_of_contents: true ---- - -# Configure Networking - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/networking/configuring/mtu.mdx b/calico_versioned_docs/version-3.25/networking/configuring/mtu.mdx deleted file mode 100644 index aded7da66e..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/mtu.mdx +++ /dev/null @@ -1,142 +0,0 @@ ---- -description: Optimize network performance for workloads by configuring the MTU in Calico to best suit your underlying network. ---- - -# Configure MTU to maximize network performance - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Configure the maximum transmission unit (MTU) for your {{prodname}} environment. - -## Value - -Optimize network performance for workloads by configuring the MTU in {{prodname}} to best suit your underlying network. - -Increasing the MTU can improve performance, and decreasing the MTU can resolve packet loss and fragmentation problems when it is too high. - -## Concepts - -### MTU and {{prodname}} defaults - -The maximum transmission unit (MTU) setting determines the largest packet size that can be transmitted through your network. MTU is configured on the veth attached to each workload, and tunnel devices (if you enable IP in IP, VXLAN, or WireGuard). - -In general, maximum performance is achieved by using the highest MTU value that does not cause fragmentation or dropped packets on the path. Maximum bandwidth increases and CPU consumption may drop for a given traffic rate. The improvement is often more significant when pod to pod traffic is being encapsulated (IP in IP, VXLAN, or WireGuard), and splitting and combining such traffic cannot be offloaded to your NICs. - -By default, {{prodname}} will auto-detect the correct MTU for your cluster based on node configuration and enabled networking modes. This guide explains how you can override auto-detection -of MTU by providing an explicit value if needed. - -To ensure auto-detection of MTU works correctly, make sure that the correct encapsulation modes are set in your [felix configuration](../../reference/resources/felixconfig.mdx). Disable any unused encapsulations (`vxlanEnabled`, `ipipEnabled`, `wireguardEnabled` and `wireguardEnabledV6`) in your felix configuration to ensure that auto-detection can pick the optimal MTU for your cluster. - -## Before you begin... - -For help on using IP in IP and/or VXLAN overlays, see [Configure overlay networking](vxlan-ipip.mdx). - -For help on using WireGuard encryption, see [Configure WireGuard encryption](../../network-policy/encrypt-cluster-pod-traffic.mdx). - -## How to - -- [Determine MTU size](#determine-mtu-size) -- [Configure MTU](#configure-mtu) -- [View current tunnel MTU values](#view-current-tunnel-mtu-values) - -### Determine MTU size - -The following table lists common MTU sizes for {{prodname}} environments. Because MTU is a global property of the network path between endpoints, you should set the MTU to the minimum MTU of any path that packets may take. - -**Common MTU sizes** - -| Network MTU | {{prodname}} MTU | {{prodname}} MTU with IP-in-IP (IPv4) | {{prodname}} MTU with VXLAN (IPv4) | {{prodname}} MTU with VXLAN (IPv6) | {{prodname}} MTU with WireGuard (IPv4) | {{prodname}} MTU with WireGuard (IPv6) | -| ---------------------- | ---------------- | ------------------------------------- | ---------------------------------- | ---------------------------------- | -------------------------------------- | -------------------------------------- | -| 1500 | 1500 | 1480 | 1450 | 1430 | 1440 | 1420 | -| 9000 | 9000 | 8980 | 8950 | 8930 | 8940 | 8920 | -| 1500 (AKS) | 1500 | 1480 | 1450 | 1430 | 1340 | 1320 | -| 1460 (GCE) | 1460 | 1440 | 1410 | 1390 | 1400 | 1380 | -| 9001 (AWS Jumbo) | 9001 | 8981 | 8951 | 8931 | 8941 | 8921 | -| 1450 (OpenStack VXLAN) | 1450 | 1430 | 1400 | 1380 | 1390 | 1370 | - -**Recommended MTU for overlay networking** - -The extra overlay header used in IP in IP, VXLAN and WireGuard protocols, reduces the minimum MTU by the size of the header. (IP in IP uses a 20-byte header, IPv4 VXLAN uses a 50-byte header, IPv6 VXLAN uses a 70-byte header, IPv4 WireGuard uses a [60-byte header](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002201.html) and IPv6 WireGuard uses an 80-byte header). - -When using AKS, the underlying network has an [MTU of 1400](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-network-tcpip-performance-tuning#azure-and-vm-mtu), even though the network interface will have an MTU of 1500. -WireGuard sets the Don't Fragment (DF) bit on its packets, and so the MTU for WireGuard on AKS needs to be set to 60 bytes below (or 80 bytes for IPv6) the 1400 MTU of the underlying network to avoid dropped packets. - -If you have a mix of WireGuard and either IP in IP or VXLAN in your cluster, you should configure the MTU to be the smallest of the values of each encap type. The reason for this is that only WireGuard encapsulation will be used between any nodes where both have WireGuard enabled, and IP in IP or VXLAN will then be used between any nodes where both do not have WireGuard enabled. This could be the case if, for example, you are in the process of installing WireGuard on your nodes. - -Therefore, we recommend the following: - -- If you use IPv4 WireGuard encryption anywhere in your pod network, configure MTU size as “physical network MTU size minus 60”. -- If you use IPv6 WireGuard encryption anywhere in your pod network, configure MTU size as “physical network MTU size minus 80”. -- If you don't use WireGuard, but use IPv4 VXLAN anywhere in your pod network, configure MTU size as “physical network MTU size minus 50”. -- If you don't use WireGuard, but use IPv6 VXLAN anywhere in your pod network, configure MTU size as “physical network MTU size minus 70”. -- If you don't use WireGuard, but use only IP in IP, configure MTU size as “physical network MTU size minus 20” -- Set the workload endpoint MTU and the tunnel MTUs to the same value (so all paths have the same MTU) - -**eBPF mode** - -Implementation of NodePorts uses VXLAN tunnel to hand off packets from one node to another, therefore VXLAN MTU setting -is used to set the MTUs of workloads (veths) and should be “physical network MTU size minus 50” (see above). - -**MTU for flannel networking** - -When using flannel for networking, the MTU for network interfaces should match the MTU of the flannel interface. - -- If using flannel with VXLAN, use the “{{prodname}} MTU with VXLAN” column in the table above for common sizes. - -### Configure MTU - -:::note - -The updated MTU used by {{prodname}} only applies to new workloads. - -::: - -Instructions for configuring MTU vary based on install method. - - - - -For Operator installations, edit the {{prodname}} operator `Installation` resource to set the `mtu` -field in the `calicoNetwork` section of the `spec`. For example: - -```bash -kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"mtu":1440}}}' -``` - -Similarly, for OpenShift: - -```bash -oc patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"mtu":1440}}}' -``` - - - - -For manifest based installations (i.e. ones that do not use the operator) edit the `calico-config` ConfigMap. For example: - -```bash -kubectl patch configmap/calico-config -n kube-system --type merge \ - -p '{"data":{"veth_mtu": "1440"}}' -``` - -After updating the ConfigMap, perform a rolling restart of all calico/node pods. For example: - -```bash -kubectl rollout restart daemonset calico-node -n kube-system -``` - - - - -### View current tunnel MTU values - -To view the current tunnel size, use the following command: - -`ip link show` - -The IP in IP tunnel appears as tunlx (for example, tunl0), along with the MTU size. For example: - -![Tunnel MTU](/img/calico/tunnel.png) diff --git a/calico_versioned_docs/version-3.25/networking/configuring/pod-mac-address.mdx b/calico_versioned_docs/version-3.25/networking/configuring/pod-mac-address.mdx deleted file mode 100644 index eafb428d66..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/pod-mac-address.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: Specify the MAC address for a pod instead of allowing the operating system to assign one ---- - -# Use a specific MAC address for a pod - -## Big picture - -Choose the MAC address for a pod instead of allowing the operating system to assign one. - -## Value - -Some applications bind software licenses to networking interface MAC addresses. - -## Concepts - -### Container MAC address - -The MAC address configured by the annotation described here will be visible from within the container on the eth0 interface. Since it is isolated to the container it will not collide with any other MAC addresses assigned to other pods on the same node. - -## Before you begin... - -Your cluster must be using Calico CNI to use this feature. - -[Configuring the Calico CNI Plugins](../../reference/configure-cni-plugins.mdx) - -## How to - -Annotate the pod with cni.projectcalico.org/hwAddr set to the desired MAC address. For example: - -``` - "cni.projectcalico.org/hwAddr": "1c:0c:0a:c0:ff:ee" -``` - -The annotation must be present when the pod is created; adding it later has no effect. diff --git a/calico_versioned_docs/version-3.25/networking/configuring/sidecar-acceleration.mdx b/calico_versioned_docs/version-3.25/networking/configuring/sidecar-acceleration.mdx deleted file mode 100644 index cfcb69775c..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/sidecar-acceleration.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -description: Use Calico to accelerate network performance of traffic through the Istio Envoy sidecar using eBPF. ---- - -# Accelerate Istio network performance - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Use Calico to accelerate network performance of routing network traffic via Istio Envoy sidecar. - -:::caution - -This feature is experimental and should not be used in production clusters. It uses a recent Linux kernel feature (eBPF SOCKMAP), which our testing confirms requires upstream kernel enhancements to reliably and securely support production clusters. We are contributing fixes to the kernel where needed. - -::: - -## Value - -Istio directs all application network traffic through an Envoy sidecar in each pod, which introduces network overhead for all traffic. Calico can greatly reduce this network overhead by automatically optimizing the Linux network path for this traffic. - -## Concepts - -### Sidecar acceleration - -The Sidecar acceleration process bypasses several layers of kernel networking, allowing data to flow between the sockets unobstructed. This makes the Envoy proxy (sidecar) to container network path as fast and efficient as possible. - -## Before you begin... - -- [Enable application layer policy](../../network-policy/istio/app-layer-policy.mdx) -- Verify that hosts installed with Calico are using Linux kernel 4.19 and above - -### Sidecar acceleration: experimental technology - -The sidecar app acceleration feature is disabled by default in Calico because the technology is currently not production ready. Use only in test environments until the technology is hardened for production security. - -## How to - -To enable sidecar acceleration for Istio-enabled apps using Calico: - - - - -```bash -kubectl patch felixconfiguration default --type merge --patch '{"spec":{"sidecarAccelerationEnabled": true}}' -``` - -You should see an output like below: - -``` -felixconfiguration.projectcalico.org/default patched -``` - - - - -```bash -calicoctl patch felixconfiguration default --patch '{"spec":{"sidecarAccelerationEnabled": true}}' -``` - -You should see an output like below: - -``` -Successfully patched 1 'FelixConfiguration' resource -``` - - - - -That’s it! Network traffic that is routed between apps and the Envoy sidecar is automatically accelerated at this point. Note that if you have an existing Istio/Calico implementation and you enable sidecar acceleration, existing connections do not benefit from acceleration. diff --git a/calico_versioned_docs/version-3.25/networking/configuring/use-ipvs.mdx b/calico_versioned_docs/version-3.25/networking/configuring/use-ipvs.mdx deleted file mode 100644 index c1d1dd029c..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/use-ipvs.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -description: Use IPVS kube-proxy for performance improvements. ---- - -# Use IPVS kube-proxy - -## Big picture - -Use IPVS kube-proxy mode for load balancing traffic across pods. - -## Value - -No matter where you are on your journey with container networking, iptables will serve you well. However, if you are scaling above 1,000 services, it’s worth looking at potential performance improvements using kube-proxy IPVS mode. - -## Concepts - -### Kubernetes kube-proxy - -Kube-proxy process handles everything related to Services on each node. It ensures that connections to the service cluster IP and port go to a pod that backs the service. If backed by more than one pod, kube-proxy load-balances traffic across pods. - -Kube-proxy runs in three modes: **userspace**, **iptables**, and **ipvs**. (Userspace is old, slow and not recommended.) Here’s a quick summary of iptables and ipvs modes. - -| **kube-proxy mode** | **Designed to be...** | **Linux kernel hooks** | **Connection processing overhead...** | -| ------------------- | ---------------------------------------------------------------------------------------------------------- | -------------------------------------- | ------------------------------------------- | -| iptables | An efficient firewall | NAT pre-routing using sequential rules | Grows proportional to cluster size | -| ipvs | A load balancer with scheduling options like round-robin, shortest-expected delay, least connections, etc. | Optimized lookup routine | Stays constant, independent of cluster size | - -If you are wondering about the performance differences between iptables and ipvs, the answers are definitely not straightforward. For a comparison between iptables (including {{prodname}}’s own use of iptables) and ipvs modes, see [Comparing kube-proxy modes: iptables or IPVS?](https://www.projectcalico.org/comparing-kube-proxy-modes-iptables-or-ipvs/). - -### IPVS mode and NodePort ranges - -Kube-proxy IPVS mode supports NodePort services and cluster IPs. {{prodname}} also uses NodePorts for routing traffic to the cluster, including the same default Kubernetes NodePort range (30000:32767). If you change your default NodePort range in Kubernetes, you must also change it on {{prodname}} to maintain ipvs coverage. - -### iptables: when to change mark bits - -To police traffic in IPVS mode, {{prodname}} uses additional iptables mark bits to store an ID for each local {{prodname}} endpoint. If you are planning to run more than 1,022 pods per host with IPVS enabled, you may need to adjust the mark bit size using the `IptablesMarkMask` parameter in {{prodname}} [FelixConfiguration](../../reference/felix/configuration.mdx#ipvs-bits). - -### {{prodname}} auto detects ipvs mode - -When {{prodname}} detects that kube-proxy is running in IPVS mode (during or after installation), IPVS support is automatically activated. Detection happens when calico-node starts up, so if you change kube-proxy's mode in a running cluster, you will need to restart your calico-node instances. - -## Before you begin... - -**Required** - -- kube-proxy is configured to use IPVS mode -- Services for ipvs mode are type, NodePort - -## How to - -As previously discussed, there is nothing you need to do in {{prodname}} to use IPVS mode; if enabled, the mode is automatically detected. However, if your default Kubernetes NodePort range changes, use the following instructions to update {{prodname}} nodeport ranges to stay in sync. Detection happens when calico-node starts up, so if you change kube-proxy's mode in a running cluster, you will need to restart your calico-node instances. - -### Change {{prodname}} default nodeport range - -In the FelixConfiguration resource, change the configuration parameter for the default node port range (`KubeNodePortRange`,) in {{prodname}} to match your new default range in Kubernetes. For help, see [FelixConfiguration](../../reference/felix/configuration.mdx). diff --git a/calico_versioned_docs/version-3.25/networking/configuring/vxlan-ipip.mdx b/calico_versioned_docs/version-3.25/networking/configuring/vxlan-ipip.mdx deleted file mode 100644 index 6277ddbdbf..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/vxlan-ipip.mdx +++ /dev/null @@ -1,156 +0,0 @@ ---- -description: Configure Calico to use IP in IP or VXLAN overlay networking so the underlying network doesn’t need to understand pod addresses. ---- - -# Overlay networking - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Enable inter workload communication across networks that are not aware of workload IPs. - -## Value - -In general, we recommend running Calico without network overlay/encapsulation. This gives you the highest performance and simplest network; the packet that leaves your workload is the packet that goes on the wire. - -However, selectively using overlays/encapsulation can be useful when running on top of an underlying network that cannot easily be made aware of workload IPs. A common example is if you are using Calico networking in AWS across multiple VPCs/subnets. In this case, Calico can selectively encapsulate only the traffic that is routed between the VPCs/subnets, and run without encapsulation within each VPC/subnet. You might also decide to run your entire Calico network with encapsulation as an overlay network -- as a quick way to get started without setting up BGP peering or other routing information in your underlying network. - -## Concepts - -### Routing workload IP addresses - -Networks become aware of workload IP addresses through layer 3 routing techniques like static routes or BGP route distribution, or layer 2 address learning. As such, they can route unencapsulated traffic to the right host for the endpoint that is the ultimate destination. However, not all networks are able to route workload IP addresses. For example, public cloud environments where you don’t own the hardware, AWS across VPC subnet boundaries, and other scenarios where you cannot peer Calico over BGP to the underlay, or easily configure static routes. This is why Calico supports encapsulation, so you can send traffic between workloads without requiring the underlying network to be aware of workload IP addresses. - -### Encapsulation types - -Calico supports two types of encapsulation: VXLAN and IP in IP. VXLAN is supported in some environments where IP in IP is not (for example, Azure). VXLAN has a slightly higher per-packet overhead because the header is larger, but unless you are running very network intensive workloads the difference is not something you would typically notice. The other small difference between the two types of encapsulation is that Calico's VXLAN implementation does not use BGP, whereas Calico's IP in IP implementation uses BGP between Calico nodes. - -### Cross-subnet - -Encapsulation of workload traffic is typically required only when traffic crosses a router that is unable to route workload IP addresses on its own. Calico can perform encapsulation on: all traffic, no traffic, or only on traffic that crosses a subnet boundary. - -## Before you begin - -**Not supported** - -- OpenStack - -**Limitations** - -- IP in IP supports only IPv4 addresses -- VXLAN in IPv6 is only supported for kernel versions ≥ 4.19.1 or redhat kernel version ≥ 4.18.0 - -## How to - -- [Configure default IP pools at install time](#configure-default-ip-pools-at-install-time) -- [Configure IP in IP encapsulation for only cross-subnet traffic](#configure-ip-in-ip-encapsulation-for-only-cross-subnet-traffic) -- [Configure IP in IP encapsulation for all inter workload traffic](#configure-ip-in-ip-encapsulation-for-all-inter-workload-traffic) -- [Configure VXLAN encapsulation for only cross-subnet traffic](#configure-vxlan-encapsulation-for-only-cross-subnet-traffic) -- [Configure VXLAN encapsulation for all inter workload traffic](#configure-vxlan-encapsulation-for-all-inter-workload-traffic) - -### Best practice - -Calico has an option to selectively encapsulate only traffic that crosses subnet boundaries. We recommend using the **cross-subnet** option with IP in IP or VXLAN to minimize encapsulation overhead. Cross-subnet mode provides better performance in AWS multi-AZ deployments, Azure VNETs, and on networks where routers are used to connect pools of nodes with L2 connectivity. - -Be aware that switching encapsulation modes can cause disruption to in-progress connections. Plan accordingly. - -### Configure default IP pools at install time - -Default IP pools are configured at install-time automatically by Calico. You can configure these default IP pools based on install method. - - - - -For operator managed clusters, you can configure encapsulation in the IP pools section of the default Installation. For example, the following installation snippet will enable VXLAN across subnets. - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - calicoNetwork: - ipPools: - - cidr: 192.168.0.0/16 - encapsulation: VXLANCrossSubnet -``` - - - - -For manifest installations of Calico, you can control the default IP pool encapsulation mode using the `CALICO_IPV4POOL_VXLAN` and `CALICO_IPV4POOL_IPIP` (and `CALICO_IPV6POOL_VXLAN` for IPv6) environment variables in the environment of the `calico-node` daemon set. - - - - -### Configure IP in IP encapsulation for only cross-subnet traffic - -IP in IP encapsulation can be performed selectively, and only for traffic crossing subnet boundaries. - -To enable this feature, set `ipipMode` to `CrossSubnet`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-ipip-cross-subnet-1 -spec: - cidr: 192.168.0.0/16 - ipipMode: CrossSubnet - natOutgoing: true -``` - -### Configure IP in IP encapsulation for all inter workload traffic - -With `ipipMode` set to `Always`, Calico routes traffic using IP in IP for all traffic originating from a Calico enabled-host, to all Calico networked containers and VMs within the IP pool. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-ipip-1 -spec: - cidr: 192.168.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -### Configure VXLAN encapsulation for only cross subnet traffic - -VXLAN encapsulation can be performed selectively, and only for traffic crossing subnet boundaries. - -To enable this feature, set `vxlanMode` to `CrossSubnet`. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-vxlan-cross-subnet-1 -spec: - cidr: 192.168.0.0/16 - vxlanMode: CrossSubnet - natOutgoing: true -``` - -### Configure VXLAN encapsulation for all inter workload traffic - -With `vxlanMode` set to `Always`, Calico routes traffic using VXLAN for all traffic originating from a Calico enabled host, to all Calico networked containers and VMs within the IP pool. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: ippool-vxlan-1 -spec: - cidr: 192.168.0.0/16 - vxlanMode: Always - natOutgoing: true -``` - -If you use only VXLAN pools, BGP networking is not required. You can disable BGP to reduce the moving parts in your cluster by [Customizing the manifests](../../getting-started/kubernetes/self-managed-onprem/config-options.mdx). Set the `calico_backend` setting to `vxlan`, and disable the BGP readiness check. - -## Additional resources - -For details on IP pool resource options, see [IP pool](../../reference/resources/ippool.mdx). diff --git a/calico_versioned_docs/version-3.25/networking/configuring/workloads-outside-cluster.mdx b/calico_versioned_docs/version-3.25/networking/configuring/workloads-outside-cluster.mdx deleted file mode 100644 index 7fe18c0fe3..0000000000 --- a/calico_versioned_docs/version-3.25/networking/configuring/workloads-outside-cluster.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -description: Configure networking to perform outbound NAT for connections from pods to outside of the cluster. ---- - -# Configure outgoing NAT - -## Big picture - -Configure {{prodname}} networking to perform outbound NAT for connections from pods to outside of the cluster. {{prodname}} optionally source NATs the pod IP to the node IP. - -## Value - -The {{prodname}} NAT outbound connection option is flexible; it can be enabled, disabled, and applied to {{prodname}} IP pools with public IPs, private IPs, or a specific range of IP addresses. This article describes some use cases for enabling and disabling outgoing NAT. - -## Concepts - -### {{prodname}} IP pools and NAT - -When a pod with an IP address in the pool initiates a network connection to an IP address to outside of {{prodname}}’s IP pools, the outgoing packets will have their source IP address changed from the pod IP address to the node IP address using SNAT (Source Network Address Translation). Any return packets on the connection automatically get this change reversed before being passed back to the pod. - -### Enable NAT: for pods with IP addresses that are not routable beyond the cluster - -A common use case for enabling NAT outgoing, is to allow pods in an overlay network to connect to IP addresses outside of the overlay, or pods with private IP addresses to connect to public IP addresses outside the cluster/the internet (subject to network policy allowing the connection, of course). When NAT is enabled, traffic is NATed from pods in that pool to any destination outside of all other {{prodname}} IP pools. - -### Disable NAT: For on-premises deployments using physical infrastructure - -If you choose to implement {{prodname}} networking with [BGP peered with your physical network infrastructure](bgp.mdx), you can use your own infrastructure to NAT traffic from pods to the internet. In this case, you should disable the {{prodname}} `natOutgoing` option. For example, if you want your pods to have public internet IPs, you should: - -- Configure {{prodname}} to peer with your physical network infrastructure -- Create an IP pool with public IP addresses for those pods that are routed to your network with NAT disabled (`natOutgoing: false`) -- Verify that other network equipment does not NAT the pod traffic - -## How to - -- [Create an IP pool with NAT outgoing enabled](#create-an-ip-pool-with-nat-outgoing-enabled) -- [Use additional IP pools to specify addresses that can be reached without NAT](#use-additional-ip-pools-to-specify-addresses-that-can-be-reached-without-nat) - -### Create an IP pool with NAT outgoing enabled - -In the following example, we create a {{prodname}} IPPool with natOutgoing enabled. Outbound NAT is performed locally on the node where each workload in the pool is hosted. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: default-ipv4-ippool -spec: - cidr: 192.168.0.0/16 - natOutgoing: true -``` - -### Use additional IP pools to specify addresses that can be reached without NAT - -Because {{prodname}} performs outgoing NAT only when connecting to an IP address that is not in a {{prodname}} IPPool, you can create additional IPPools that are not used for pod IP addresses, but prevent NAT to certain CIDR blocks. This is useful if you want nodes to NAT traffic to the internet, but not to IPs in certain internal ranges. For example, if you did not want to NAT traffic from pods to 10.0.0.0/8, you could create the following pool. You must ensure that the network between the cluster and 10.0.0.0/8 can route pod IPs. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: no-nat-10.0.0.0-8 -spec: - cidr: 10.0.0.0/8 - disabled: true -``` diff --git a/calico_versioned_docs/version-3.25/networking/determine-best-networking.mdx b/calico_versioned_docs/version-3.25/networking/determine-best-networking.mdx deleted file mode 100644 index 3a0a8f1d56..0000000000 --- a/calico_versioned_docs/version-3.25/networking/determine-best-networking.mdx +++ /dev/null @@ -1,288 +0,0 @@ ---- -description: Learn about the different networking options Calico supports so you can choose the best option for your needs. ---- - -# Determine best networking option - -## Big picture - -Learn about the different networking options {{prodname}} supports so you can choose the best option for your needs. - -## Value - -{{prodname}}’s flexible modular architecture supports a wide range of deployment options, so you can select the best networking approach for your specific environment and needs. This includes the ability to run with a variety of CNI and IPAM plugins, and underlying network types, in non-overlay or overlay modes, with or without BGP. - -## Concepts - -If you want to fully understand the network choices available to you, we recommend you make sure you are familiar with and understand the following concepts. If you would prefer to skip the learning and get straight to the choices and recommendations, you can jump ahead to [Networking Options](#networking-options). - -### Kubernetes networking basics - -The Kubernetes network model defines a “flat” network in which: - -- Every pod get its own IP address. -- Pods on any node can communicate with all pods on all other nodes without NAT. - -This creates a clean, backwards-compatible model where pods can be treated much like VMs or physical hosts from the perspectives of port allocation, naming, service discovery, load balancing, application configuration, and migration. Network segmentation can be defined using network policies to restrict traffic within these base networking capabilities. - -Within this model there’s quite a lot of flexibility for supporting different networking approaches and environments. The details of exactly how the network is implemented depend on the combination of CNI, network, and cloud provider plugins being used. - -### CNI plugins - -CNI (Container Network Interface) is a standard API which allows different network implementations to plug into Kubernetes. Kubernetes calls the API any time a pod is being created or destroyed. There are two types of CNI plugins: - -- CNI network plugins: responsible for adding or deleting pods to/from the Kubernetes pod network. This includes creating/deleting each pod’s network interface and connecting/disconnecting it to the rest of the network implementation. -- CNI IPAM plugins: responsible for allocating and releasing IP addresses for pods as they are created or deleted. Depending on the plugin, this may include allocating one or more ranges of IP addresses (CIDRs) to each node, or obtaining IP addresses from an underlying public cloud’s network to allocate to pods. - -### Cloud provider integrations - -Kubernetes cloud provider integrations are cloud-specific controllers that can configure the underlying cloud network to help provide Kubernetes networking. Depending on the cloud provider, this could include automatically programming routes into the underlying cloud network so it knows natively how to route pod traffic. - -### Kubenet - -Kubenet is an extremely basic network plugin built into Kubernetes. It does not implement cross-node networking or network policy. It is typically used together with a cloud provider integration that sets up routes in the cloud provider network for communication between nodes, or in single node environments. Kubenet is not compatible with {{prodname}}. - -### Overlay networks - -An overlay network is a network that is layered on top of another network. In the context of Kubernetes, an overlay network can be used to handle pod-to-pod traffic between nodes on top of an underlying network that is not aware of pod IP addresses or which pods are running on which nodes. Overlay networks work by encapsulating network packets that an underlying network doesn’t know how to handle (for example using pod IP addresses) within an outer packet which the underlying network does know how to handle (for example node IP addresses). Two common network protocols used for encapsulation are VXLAN and IP-in-IP. - -The main advantage of using an overlay network is that it reduces dependencies on the underlying network. For example, you can run a VXLAN overlay on top of almost any underlying network, without needing to integrate with or make any changes to the underlying network. - -The main disadvantages of using an overlay network are: - -- A slight performance impact. The process of encapsulating packets takes a small amount of CPU, and the extra bytes required in the packet to encode the encapsulation (VXLAN or IP-in-IP headers) reduces the maximum size of inner packet that can be sent, which in turn can mean needing to send more packets for the same amount of total data. -- The pod IP addresses are not routable outside of the cluster. More on this below! - -### Cross-subnet overlays - -In addition to standard VXLAN or IP-in-IP overlays, {{prodname}} also supports “cross-subnet” modes for VXLAN and IP-in-IP. In this mode, within each subnet, the underlying network acts as an L2 network. Packets sent within a single subnet are not encapsulated, so you get the performance of a non-overlay network. Packets sent across subnets are encapsulated, like a normal overlay network, reducing dependencies on the underlying network (without the need to integrate with or make any changes to the underlying network). - -Just like with a standard overlay network, the underlying network is not aware of pod IP addresses and the pod IP addresses are not routable outside of the cluster. - -### Pod IP routability outside of the cluster - -An important distinguishing feature of different Kubernetes network implementations is whether or not pod IP addresses are routable outside of the cluster across the broader network. - -**Not routable** - -If the pod IP addresses are not routable outside of the cluster then when a pod tries to establish a network connection to an IP address that is outside of the cluster, Kubernetes uses a technique called SNAT (Source Network Address Translation) to change the source IP address from the IP address of the pod, to the IP address of the node hosting the pod. Any return packets on the connection get automatically mapped back to the pod IP address. So the pod is unaware the SNAT is happening, the destination for the connection sees the node as the source of the connection, and the underlying broader network never sees pod IP addresses. - -For connections in the opposite direction, where something outside of the cluster needs to connect to a pod, this can only be done via Kubernetes services or Kubernetes ingress. Nothing outside of the cluster can directly connect to a pod IP address, because the broader network doesn’t know how to route packets to pod IP addresses. - -**Routable** - -If the pod IP addresses are routable outside of the cluster then pods can connect to the outside world without SNAT, and the outside world can connect directly to pods without going via a Kubernetes service or Kubernetes ingress. - -The advantage of pod IP addresses that are routable outside the cluster are: - -- Avoiding SNAT for outbound connections may be essential for integrating with existing broader security requirements. It can also simplify debugging and understandability of operation logs. -- If you have specialized workloads that mean some pods need to be directly accessible without going via Kubernetes services or Kubernetes ingress, then routable pod IPs can be operationally simpler than the alternative of using host networked pods. - -The main disadvantage of pod IP addresses that are routable outside the cluster is that the pod IPs must be unique across the broader network. So for example, if running multiple clusters you will need to use a different IP address range (CIDR) for pods in each cluster. This in turn can lead to IP address range exhaustion challenges when running at scale, or if there are other significant existing enterprise demands on IP address space. - -**What determines routability?** - -If you are using an overlay network for your cluster, then pod IPs are not normally routable outside of the cluster. - -If you aren’t using an overlay network, then whether pod IPs are routable outside of the cluster depends on what combination of CNI plugins, cloud provider integrations, or (for on-prem) BGP peering with the physical network, is being used. - -### BGP - -BGP (Border Gateway Protocol) is a standards based networking protocol for sharing routes across a network. It’s one of the fundamental building blocks of the internet, with exceptional scaling characteristics. - -{{prodname}} has built in support for BGP. In an on-prem deployment, this allows {{prodname}} to peer with the physical network (typically to Top of Rack routers) to exchange routes, making a non-overlay network where pod IP addresses routable across the broader network, just like any other workload attached to the network. - -## About {{prodname}} Networking - -{{prodname}}’s flexible modular architecture for networking includes the following. - -**{{prodname}} CNI network plugin** - -The {{prodname}} CNI network plugin connects pods to the host network namespace’s L3 routing using a pair of virtual Ethernet devices (veth pair). This L3 architecture avoids the unnecessary complexity and performance overheads of additional L2 bridges that feature in many other Kubernetes networking solutions. - -**{{prodname}} CNI IPAM plugin** - -The {{prodname}} CNI IPAM plugin allocates IP addresses for pods out of one or more configurable IP address ranges, dynamically allocating small blocks of IPs per node as required. The result is a more efficient IP address space usage compared to many other CNI IPAM plugins, including the host local IPAM plugin which is used in many networking solutions. - -**Overlay network modes** - -{{prodname}} can provide both VXLAN or IP-in-IP overlay networks, including cross-subnet only modes. - -**Non-overlay network modes** - -{{prodname}} can provide non-overlay networks running on top of any underlying L2 network, or an L3 network that is either a public cloud network with appropriate cloud provider integration, or a BGP capable network (typically an on-prem network with standard Top-of-Rack routers). - -**Network policy enforcement** - -{{prodname}}’s network policy enforcement engine implements the full range of Kubernetes Network Policy features, plus the extended features of {{prodname}} Network Policy. This works in conjunction with {{prodname}}’s built in networking modes, or any other {{prodname}} compatible network plugins and cloud provider integrations. - -## {{prodname}} compatible CNI plugins and cloud provider integrations - -In addition to the {{prodname}} CNI plugins and built in networking modes, {{prodname}} is also compatible with a number of third party CNI plugins and cloud provider integrations. - -**Amazon VPC CNI** - -The Amazon VPC CNI plugin allocates pod IPs from the underlying AWS VPC and uses AWS elastic network interfaces to provide VPC native pod networking (pod IPs that are routable outside of the cluster). It is the default networking used in [Amazon EKS](https://aws.amazon.com/eks/), with Calico for network policy enforcement. - -**Azure CNI** - -The Azure CNI plugin allocates pod IPs from the underlying Azure VNET configures the Azure virtual network to provide VNET native pod networking (pod IPs that are routable outside of the cluster). It is the default networking used in [Microsoft AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/), with Calico for network policy enforcement. - -**Azure cloud provider** - -The Azure cloud provider integration can be used as an alternative to the Azure CNI plugin. It uses the host-local IPAM CNI plugin to allocate pod IPs, and programs the underlying Azure VNET subnet with corresponding routes. Pod IPs are only routable within the VNET subnet (which often equates to meaning they are not routable outside of the cluster). - -**Google cloud provider** - -The Google cloud provider integration uses host-local IPAM CNI plugin to allocate pod IPs, and programs the Google cloud network Alias IP ranges to provide VPC native pod networking on Google cloud (pod IPs that are routable outside of the cluster). It is the default for Google Kubernetes Engine (GKE), with Calico for network policy enforcement. - -**Host local IPAM** - -The host local CNI IPAM plugin is a commonly used IP address management CNI plugin, which allocates a fixed size IP address range (CIDR) to each node, and then allocates pod IP addresses from within that range. The default address range size is 256 IP addresses (a /24), though two of those IP addresses are reserved for special purposes and not assigned to pods. The simplicity of host local CNI IPAM plugin makes it easy to understand, but results in less efficient IP address space usage compared to {{prodname}} CNI IPAM plugin. - -**Flannel** - -Flannel routes pod traffic using static per-node CIDRs obtained from the host-local IPAM CNI plugin. Flannel provides a number of networking backends, but is predominantly used with its VXLAN overlay backend. {{prodname}} CNI and {{prodname}} network policy can be combined with flannel and the host-local IPAM plugin to provide a VXLAN network with policy enforcement. This combination is sometimes referred to as “Canal”. - -:::note - -{{prodname}} now has built in support for VXLAN, which we generally recommend for simplicity in preference to using the Calico+Flannel combination. - -::: - -## Networking Options - -### On-prem - -The most common network setup for {{prodname}} on-prem is non-overlay mode using [BGP to peer](configuring/bgp.mdx) with the physical network (typically top of rack routers) to make pod IPs routable outside of the cluster. (You can of course configure the rest of your on-prem network to limit the scope of pod IP routing outside of the cluster if desired.) This setup provides a rich range of advanced {{prodname}} features, including the ability to advertise Kubernetes service IPs (cluster IPs or external IPs), and the ability to control IP address management at the pod, namespace, or node level, to support a wide range of possibilities for integrating with existing enterprise network and security requirements. - - - -If peering BGP to the physical network is not an option, you can also run non-overlay mode if the cluster is within a single L2 network, with Calico just peering BGP between the nodes in the cluster. Even though this is not strictly an overlay network, the pod IPs are not routable outside of the cluster, because the broader network does not have routes for the pod IPs. - - - -Alternatively you can run {{prodname}} in either VXLAN or IP-in-IP overlay mode, with cross-subnet overlay mode to optimize performance within each L2 subnet. - -_Recommended:_ - - - -_Alternative:_ - - - -### AWS - -If you would like pod IP addresses to be routable outside of the cluster then you must use the Amazon VPC CNI plugin. This is the default networking mode for [EKS](https://aws.amazon.com/eks/), with Calico for network policy. Pod IP addresses are allocated from the underlying VPC and the maximum number of pods per node is dependent on the [instance type](https://github.com/aws/amazon-vpc-cni-k8s#eni-allocation). - - - -If you prefer to avoid dependencies on a specific cloud provider, or allocating pod IPs from the underlying VPC is problematic due to IP address range exhaustion challenges, or if the maximum number of pods supported per node by the Amazon VPC CNI plugin is not sufficient for your needs, we recommend using {{prodname}} networking in cross-subnet overlay mode. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network. - - - -You can learn more about Kubernetes Networking on AWS, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on AWS](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-pod-networking-on-aws/). - -### Azure - -If you would like pod IP addresses to be routable outside of the cluster then you must use the Azure CNI plugin. This is supported by [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/), with Calico for network policy. Pod IP addresses are allocated from the underlying VNET. - - - -If you want to use AKS but allocating pod IPs from the underlying VNET is problematic due to IP address range exhaustion challenges, you can use {{prodname}} in conjunction with the Azure cloud provider integration. This uses host-local IPAM to allocate /24 per node, and programs routes within the cluster’s underlying VNET subnet for those /24. Pod IPs are not routable outside of the cluster / VNET subnet, so the same pod IP address range (CIDR) can be used across multiple clusters if desired. - -:::note - -This is referred to as kubenet + Calico in some AKS docs, but it is actually Calico CNI with Azure cloud provider, and does not use the kubenet plugin. - -::: - - - -If you aren’t using AKS, and prefer to avoid dependencies on a specific cloud provider or allocating pod IPs from the underlying VNET is problematic due to IP address range exhaustion challenges, we recommend using {{prodname}} networking in cross-subnet overlay mode. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network. - - - -You can learn more about Kubernetes Networking on Azure, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on Azure](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-azure/). - -### Google Cloud - -If you would like pod IP addresses to be routable outside of the cluster then you must use the Google cloud provider integration in conjunction with host-local IPAM CNI plugin. This is supported by [GKE](https://cloud.google.com/kubernetes-engine), with Calico for network policy. Pod IP addresses are allocated from the underlying VPC, and corresponding Alias IP addresses are automatically assigned to nodes. - - - -If you prefer to avoid dependencies on a specific cloud provider, or allocating pod IPs from the underlying VPC is problematic due to IP address range exhaustion challenges, we recommend using {{prodname}} networking in overlay mode. As Google cloud network is a pure L3 network, cross-subnet mode is not supported. Pod IPs will not be routable outside of the cluster, but you can scale the cluster up to the limits of Kubernetes with no dependencies on the underlying cloud network. - -_Recommended:_ - - - -_Alternative:_ - - - -You can learn more about Kubernetes Networking on Google cloud, including how each of the above options works under the covers, in this short video: [Everything you need to know about Kubernetes networking on Google cloud](https://www.projectcalico.org/everything-you-need-to-know-about-kubernetes-networking-on-google-cloud/). - -### IBM Cloud - -If you are using IBM Cloud then we recommend using [IKS](https://www.ibm.com/products/kubernetes-service/), which has Calico built in to provide cross-subnet IP-in-IP overlay. In addition to providing network policy for pods, IKS also uses Calico network policies to [secure the hosts nodes](https://cloud.ibm.com/docs/containers?topic=containers-network_policies#default_policy) within the cluster. - - - -### Anywhere - -The above list of environments is obviously not exhaustive. Understanding the concepts and explanations in this guide has hopefully helped you figure out what is right for your environment. If you still aren't sure then you can ask for advice through the Calico Users's Slack or Discourse forum. And remember you can run Calico in VXLAN overlay mode in almost any environment if you want to get started without worrying too deeply about the different options. - - - -## Additional resources - -- [Video playlist: Everything you need to know about Kubernetes networking](https://www.youtube.com/playlist?list=PLoWxE_5hnZUZMWrEON3wxMBoIZvweGeiq) -- [Configure BGP peering](configuring/bgp.mdx) -- [Configure overlay networking](configuring/vxlan-ipip.mdx) -- [Advertise Kubernetes service IP addresses](configuring/advertise-service-ips.mdx) -- [Customize IP address management](ipam/index.mdx) -- [Interoperate with legacy firewalls using IP ranges](ipam/legacy-firewalls.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/index.mdx b/calico_versioned_docs/version-3.25/networking/index.mdx deleted file mode 100644 index 6f9d67e775..0000000000 --- a/calico_versioned_docs/version-3.25/networking/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Calico's flexible networking options reduce the barriers to adopting a CaaS platform solution. Determine the best networking option for your implementation. -hide_table_of_contents: true ---- - -# Networking - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/networking/ipam/add-floating-ip.mdx b/calico_versioned_docs/version-3.25/networking/ipam/add-floating-ip.mdx deleted file mode 100644 index d119e2fc14..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/add-floating-ip.mdx +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Configure one or more floating IPs to use as additional IP addresses for reaching a Kubernetes pod. ---- - -# Add a floating IP to a pod - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Configure one or more floating IPs that can be used as additional IP addresses for reaching a Kubernetes pod. - -## Value - -Like Kubernetes Services, a floating IP provides a stable IP address to reach some network service that might be backed by different pods at different times. The primary advantage over Kubernetes services is that floating IPs work on all protocols: not just TCP, UDP, and SCTP. Unlike Kubernetes services, a floating IP fronts a single pod at a time and cannot be used for load balancing. - -## Concepts - -A **floating IP** is an additional IP address assigned to a workload endpoint. These IPs “float” in the sense that they can be moved around the cluster and front different workload endpoints at different times. The workload itself is generally unaware of the floating IP; the host uses network address translation (NAT) on incoming traffic to change the floating IP to the workload’s real IP before delivering packets to the workload. - -A Kubernetes Service assigns a **cluster IP** that allows other endpoints on the network (and may also assign a nodePort and/or an external load balancer IP) to access a set of pods, using network address translation. In many circumstances, a Kubernetes Service can handle similar use cases as a floating IP, and is generally recommended for Kubernetes users because it is a native Kubernetes concept. One thing you cannot do with Kubernetes Services is use protocols other than UDP, TCP, and SCTP (use of such protocols is fairly rare). - -## Before you begin... - -The features in this How to require: - -- {{prodname}} CNI plugin - -To verify, ssh to one of the Kubernetes nodes and look for at the CNI plugin configuration, usually located at `/etc/cni/net.d/`. If you see the file, `10-calico.conflist`, you are using the {{prodname}} CNI plugin. - -## How to - -- [Enable floating IPs](#enable-floating-ips) -- [Configure a pod to use a floating IP](#configure-a-pod-to-use-a-floating-ip) - -### Enable floating IPs - - - - -Floating IPs for Kubernetes pods are not currently supported for operator-managed Calico clusters. - - - - -By default, floating IPs are disabled. To enable floating IPs, follow these steps. - -Modify the calico-config ConfigMap in the kube-system namespace. In the `cni_network_config` section, add the following stanza to the “calico” plugin config section. - -``` - "feature_control": { - "floating_ips": true - } -``` - -For example, your `cni_network_config` will look similar to the following after the update. - -``` - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "__KUBECONFIG_FILEPATH__" - }, - "feature_control": { - "floating_ips": true - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } -``` - - - - -### Configure a pod to use a floating IP - - - - -Floating IPs for Kubernetes pods are not currently supported for operator-managed Calico clusters. - - - - -Annotate the pod with the key `cni.projectcalico.org/floatingIPs` and the value set to a list of IP addresses enclosed in square brackets. For correct advertisement to the rest of the cluster, all floating IPs must be within the range of a configured [IP pool](../../reference/resources/ippool.mdx). - -For example: - -``` -"cni.projectcalico.org/floatingIPs": "[\"10.0.0.1\"]" -``` - -Note the use of the escaped `\"` for the inner double quotes around the addresses. - - - diff --git a/calico_versioned_docs/version-3.25/networking/ipam/assign-ip-addresses-topology.mdx b/calico_versioned_docs/version-3.25/networking/ipam/assign-ip-addresses-topology.mdx deleted file mode 100644 index 9d6d498eee..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/assign-ip-addresses-topology.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -description: Configure Calico to use specific IP pools for different topologies including zone, rack, or region. ---- - -# Assign IP addresses based on topology - -## Big picture - -Assign blocks of IP addresses from an IP pool for different topological areas. - -## Value - -If you have workloads in different regions, zones, or rack, you may want them to get IP addresses from the same IP pool. This strategy is useful for reducing the number of routes that are required in the network, or to meet requirements imposed by an external firewall device or policy. {{prodname}} makes it easy to do this using an IP pool resource with node labels and node selectors. - -## Concepts - -### IP address assignment - -Topology-based IP address assignment requires addresses to be per-host (node). -As such, Kubernetes annotations cannot be used because annotations are only per-namespace and per-pod. And although you can configure IP addresses for nodes in the CNI configuration, you are making changes within the host’s file system. The best option is to use node-selection IP address assignment using IP pools. - -### Node-selection IP address management - -Node selection-based IP address assignment is exactly what it sounds like: node labels are set, and Calico uses node selectors to decide which IP pools to use when assigning IP addresses to the node. - -### Best practice - -Nodes only assign workload addresses from IP pools which select them. To avoid having a workload not get an IP and fail to start, it is important to ensure that all nodes are selected by at least one IP pool. - -## How to - -### Create an IP pool, specific nodes - -In the following example, we create an IP pool that only allocates IP addresses for nodes with the label, **zone=west**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: zone-west-ippool -spec: - cidr: 192.168.0.0/24 - ipipMode: Always - natOutgoing: true - nodeSelector: zone == "west" -``` - -Then, we label a node with zone=west. For example: - -```bash -kubectl label nodes kube-node-0 zone=west -``` - -## Tutorial - -In this tutorial, we create a cluster with four nodes across two racks (two nodes/rack). - -``` - ------------------- - | router | - ------------------- - | | ---------------- --------------- -| rack-0 | | rack-1 | ---------------- --------------- -| kube-node-0 | | kube-node-2 | -- - - - - - - - - - - - - - - - -| kube-node-1 | | kube-node-3 | -- - - - - - - - - - - - - - - - -``` - -Using the pod IP range `192.168.0.0/16`, we target the following setup: reserve -the `192.168.0.0/24` and `192.168.1.0/24` pools for `rack-0`, `rack-1`. Let's -get started. - -By installing {{prodname}} without setting the default IP pool to match, -running `calicoctl get ippool -o wide` shows that {{prodname}} created its -default IP pool of `192.168.0.0/16`: - -``` -NAME CIDR NAT IPIPMODE DISABLED SELECTOR -default-ipv4-ippool 192.168.0.0/16 true Always false all() -``` - -1. Delete the default IP pool. - - Since the `default-ipv4-ippool` IP pool resource already exists and accounts - for the entire `/16` block, we will have to delete this first: - - ```bash - calicoctl delete ippools default-ipv4-ippool - ``` - -2. Label the nodes. - - To assign IP pools to specific nodes, these nodes must be labelled - using [kubectl label](https://kubernetes.io/docs/tasks/configure-pod-container/assign-pods-nodes/#add-a-label-to-a-node). - - ```bash - kubectl label nodes kube-node-0 rack=0 - kubectl label nodes kube-node-1 rack=0 - kubectl label nodes kube-node-2 rack=1 - kubectl label nodes kube-node-3 rack=1 - ``` - -3. Create an IP pool for each rack. - - ```bash - calicoctl create -f -< - nginx-5c7588df-s7qw6 1/1 Running 0 6m7s 192.168.0.129 kube-node-1 - nginx-5c7588df-w7r7g 1/1 Running 0 6m3s 192.168.1.65 kube-node-2 - nginx-5c7588df-62lnf 1/1 Running 0 6m3s 192.168.1.1 kube-node-3 - nginx-5c7588df-pnsvv 1/1 Running 0 6m3s 192.168.1.64 kube-node-2 - ``` - - The grouping of IP addresses assigned to the workloads differ based on what - node that they were scheduled to. Additionally, the assigned address for - each workload falls within the respective IP pool that selects the rack that - they run on. - -:::note - -{{prodname}} IPAM will not reassign IP addresses to workloads -that are already running. To update running workloads with IP addresses from -a newly configured IP pool, they must be recreated. We recommend doing this -before going into production or during a maintenance window. - -::: - -## Additional resources - -[Calico IPAM](../../reference/configure-cni-plugins.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/ipam/change-block-size.mdx b/calico_versioned_docs/version-3.25/networking/ipam/change-block-size.mdx deleted file mode 100644 index 03481cd23e..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/change-block-size.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -description: Expand or shrink the IP pool block size to efficiently manage IP pool addresses. ---- - -# Change IP pool block size - -import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx'; - -## Big picture - -Change the IP pool block size to efficiently manage IP pool addresses. - -## Value - -Changing IP pool block size after installation requires ordered steps to minimize pod connectivity disruption. - -## Concepts - -### About IP pools - -By default, {{prodname}} uses an IPAM block size of 64 addresses – /26 for IPv4, and /122 for IPv6. However, the block size can be changed depending on the IP pool address family. - -- IPv4: 20-32, inclusive -- IPv6: 116-128, inclusive - -You can have **only one default IP pool for per protocol** in your installation manifest. In this example, there is one IP pool for IPv4 (/26), and one IP pool for IPv6 (/122). - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - # Configures Calico networking. - calicoNetwork: - # Note: The ipPools section cannot be modified post-install. - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() - - blockSize: 122 - cidr: 2001::00/64 - encapsulation: None - natOutgoing: Enabled - nodeSelector: all() -``` - -However, the following is invalid because it has two IP pools for IPv4. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - # Configures Calico networking. - calicoNetwork: - # Note: The ipPools section cannot be modified post-install. - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() - - blockSize: 31 - cidr: 10.48.8.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() -``` - -### Expand or shrink IP pool block sizes - -By default, the {{prodname}} IPAM block size for an IP pool is /26. To expand from the default size /26, lower the `blockSize` (for example, /24). To shrink the `blockSize` from the default /26, raise the number (for example, /28). - -### Best practice: change IP pool block size before installation - -Because the `blockSize` field cannot be edited directly after {{prodname}} installation, it is best to change the IP pool block size before installation to minimize disruptions to pod connectivity. - -## Before you begin... - -**Required** - -Verify that you are using {{prodname}} IPAM. - - - -## How to - -:::note - -Follow the steps to minimize pod connectivity disruption. Pods may lose connectivity when they are redeployed, and may lose external connectivity while in the temporary pool. Also, when pods are deleted, applications may be temporarily unavailable (depending on the type of application). Plan your changes accordingly. - -::: - -The high-level steps to follow are: - -1. [Create a temporary IP pool](#create-a-temporary-ip-pool) - **Note**: The temporary IP pool must not overlap with the existing one. -1. [Disable the existing IP pool](#disable-the-existing-ip-pool) - **Note**: When you disable an IP pool, only new IP address allocations are prevented; networking of existing pods are not affected. -1. [Delete pods from the existing IP pool](#delete-pods-from-the-existing-ip-pool) - This includes any new pods that may have been created with the existing IP pool prior to disabling the pool. Verify that new pods get an address from the temporary IP pool. -1. [Delete the existing IP pool](#delete-the-existing-ip-pool) -1. [Create a new IP pool with the desired block size](#create-a-new-ip-pool-with-the-desired-block-size) -1. [Disable the temporary IP pool](#disable-the-temporary-ip-pool) -1. [Delete pods from the temporary IP pool](#delete-pods-from-the-temporary-ip-pool) -1. [Delete the temporary IP pool](#delete-the-temporary-ip-pool) - -## Tutorial - -In the following steps, our Kubernetes cluster has a default CIDR block size of /26. We want to shrink the block size to /28 to use the pool more efficiently. - -### Create a temporary IP pool - -We add a new IPPool with the CIDR range, 10.0.0.0/16. - -Create a temporary-pool.yaml. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: temporary-pool -spec: - cidr: 10.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Apply the changes. - -```bash -calicoctl apply -f temporary-pool.yaml -``` - -Let’s verify the temporary IP pool. - -```bash -calicoctl get ippool -o wide -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always false -temporary-pool 10.0.0.0/16 true Always false -``` - -### Disable the existing IP pool - -Disable allocations in the default pool. - -```bash -calicoctl patch ippool default-ipv4-ippool -p '{"spec": {"disabled": true}}' -``` - -Verify the changes. - -```bash -calicoctl get ippool -o wide -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always true -temporary-pool 10.0.0.0/16 true Always false -``` - -### Delete pods from the existing IP pool - -In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster. - -```bash -kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp -``` - -Restart all pods with just one command. - -:::caution - -The following command is disruptive and may take several minutes depending on the number of pods deployed. - -::: - -```bash -kubectl delete pod -A --all -``` - -### Delete the existing IP pool - -Now that you’ve verified that pods are getting IPs from the new range, you can safely delete the existing pool. - -```bash -calicoctl delete ippool default-ipv4-ippool -``` - -### Create a new IP pool with the desired block size - -In this step, we update the IPPool with the new block size of (/28). - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: default-ipv4-ippool -spec: - blockSize: 28 - cidr: 192.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Apply the changes. - -```bash -calicoctl apply -f pool.yaml -``` - -### Disable the temporary IP pool - -```bash -calicoctl patch ippool temporary-pool -p '{"spec": {"disabled": true}}' -``` - -### Delete pods from the temporary IP pool - -In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster. - -```bash -kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp -``` - -Restart all pods with just one command. - -:::caution - -The following command is disruptive and may take several minutes depending on the number of pods deployed. - -::: - -```bash -kubectl delete pod -A --all -``` - -Validate your pods and block size are correct by running the following commands: - -```bash -kubectl get pods --all-namespaces -o wide -calicoctl ipam show --show-blocks -``` - -### Delete the temporary IP pool - -Clean up the IP pools by deleting the temporary IP pool. - -```bash -calicoctl delete pool temporary-pool -``` diff --git a/calico_versioned_docs/version-3.25/networking/ipam/get-started-ip-addresses.mdx b/calico_versioned_docs/version-3.25/networking/ipam/get-started-ip-addresses.mdx deleted file mode 100644 index 08f9be302a..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/get-started-ip-addresses.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -description: Configure Calico to use Calico IPAM or host-local IPAM, and when to use one or the other. ---- - -# Get started with IP address management - -## Big picture - -Understand how IP address management (IPAM) functions in a Kubernetes cluster using Calico. - -## Value - -Different IPAM techniques provide different feature sets. Calico’s IPAM provides additional IP allocation efficiency and flexibility compared to other address management approaches. - -## Concepts - -### IPAM in Kubernetes - -Kubernetes uses IPAM plugins to allocate and manage IP addresses assigned to pods. Different IPAM plugins provide different feature sets. Calico provides its own IPAM plugin called **calico-ipam** which is designed to work well with Calico and includes a number of features. - -### Calico IPAM - -The **calico-ipam** plugin uses Calico’s IP pool resource to control how IP addresses are allocated to pods within the cluster. This is the default plugin used by most Calico installations. - -By default, Calico uses a single IP pool for the entire Kubernetes pod CIDR, but you can divide the pod CIDR into several pools. You can assign separate IP pools to particular selections of **nodes**, or to teams, users, or applications within a cluster using **namespaces**. - -You can control which pools Calico uses for each pod using - -- node selectors -- an annotation on the pod’s namespace, or -- an annotation on the pod - -Calico also supports the **host-local** IPAM plugin. However, when using the host-local IPAM plugin some Calico features are not available. - -### Calico IPAM blocks - -In Calico IPAM, IP pools are subdivided into blocks -- smaller chunks that are associated with a particular node in the cluster. Each node in the cluster can have one or more blocks associated with it. Calico will automatically create and destroy blocks as needed as the number of nodes and pods in the cluster grows or shrinks. - -Blocks allow Calico to efficiently aggregate addresses assigned to pods on the same node, reducing the size of the routing table. By default Calico will try to allocate IP addresses from within an associated block, creating a new block if necessary. Calico can also assign addresses to pods on a node that are not within a block associated with that node. This allows for IP allocations independent of the node on which a pod is launched. - -By default, Calico creates blocks with room for 64 addresses (a /26), but you can control block sizes for each IP pool. - -### Host-local IPAM - -The host-local plugin is a simple IP address management plugin. It uses predetermined CIDRs statically allocated to each node to choose addresses for pods. Once set, the CIDR for a node cannot be modified. Pods can be assigned addresses only from within the CIDR allocated to the node. - -Calico can use the host-local IPAM plugin, using the **Node.Spec.PodCIDR** field in the Kubernetes API to determine the CIDR to use for each node. However, per-node, per-pod, and per-namespace IP allocation features are not available using the host-local plugin. - -The host-local IPAM plugin is primarily used by other methods of routing pod traffic from one host to another. For example, it is used when installing Calico for policy enforcement with flannel networking, as well as when using Calico in Google Kubernetes Engine (GKE). - -## How to - -### Install Calico with calico-ipam - -Follow one of the [getting started guides](../../getting-started/index.mdx) to install Calico. - -### Install Calico with host-local IPAM - -Follow one of the [getting started guides](../../getting-started/index.mdx) to install Calico with flannel networking, or on GKE. - -Or, see the [reference documentation on host-local IPAM](../../reference/configure-cni-plugins.mdx#using-host-local-ipam). - -## Tutorial - -For a blog/tutorial on IP pools, see [Calico IPAM: Explained and Enhanced](https://www.tigera.io/blog/calico-ipam-explained-and-enhanced/). - -## Additional resources - -- [IP Pool](../../reference/resources/ippool.mdx) - -There are several other ways to leverage Calico IPAM including: - -- [Assign addresses based on topology](assign-ip-addresses-topology.mdx) -- [Use a specific address for a pod](use-specific-ip.mdx) -- [Migrate from one IP pool to another](migrate-pools.mdx) -- [Interoperate with legacy firewalls using IP ranges](legacy-firewalls.mdx) -- [View IP address utilization](../../reference/calicoctl/ipam/show.mdx) -- [Change IP address block size](../../reference/resources/ippool.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/ipam/index.mdx b/calico_versioned_docs/version-3.25/networking/ipam/index.mdx deleted file mode 100644 index 834d48a7fd..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Calico IPAM is flexible and efficient. Learn how to interoperate with legacy firewalls using IP address ranges, advertise Kubernetes service IPs, and more. -hide_table_of_contents: true ---- - -# IP address management - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/networking/ipam/ip-autodetection.mdx b/calico_versioned_docs/version-3.25/networking/ipam/ip-autodetection.mdx deleted file mode 100644 index 8d5df010f7..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/ip-autodetection.mdx +++ /dev/null @@ -1,308 +0,0 @@ ---- -description: Calico IP autodetection ensures the correct IP address is used for routing. Learn how to customize it. ---- - -# Configure IP autodetection - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Configure IP autodetection for {{prodname}} nodes to ensure the correct IP address is used for routing. - -## Value - -When you install {{prodname}} on a node, an IP address and subnet is automatically detected. {{prodname}} provides several ways to configure IP/subnet autodetection, and supports configuring specific IPs for: - -- Hosts with multiple external interfaces -- Host interfaces with multiple IP addresses -- [Changes to cross subnet packet encapsulation](../configuring/vxlan-ipip.mdx) -- Changes to host IP address - -## Concepts - -### Autodetecting node IP address and subnet - -For internode routing, each {{prodname}} node must be configured with an IPv4 address and/or an IPv6 address. When you install {{prodname}} on a node, a node resource is automatically created using routing information that is detected from the host. For some deployments, you may want to update autodetection to ensure nodes get the correct IP address. - -**Sample default node resource after installation** - -```yaml -apiVersion: projectcalico.org/v3 -kind: Node -metadata: - name: node-hostname -spec: - bgp: - asNumber: 64512 - ipv4Address: 10.244.0.1/24 - ipv6Address: 2000:db8:85a3::8a2e:370:7335/120 - ipv4IPIPTunnelAddr: 192.168.0.1 -``` - -### Autodetection methods - -By default, {{prodname}} uses the **first-found** method; the first valid IP address on the first interface (excluding local interfaces such as the docker bridge). However, you can change the default method to any of the following: - -- Address assigned to Kubernetes node (**kubernetes-internal-ip**) -- Address used by the node to reach a particular IP or domain (**can-reach**) -- Regex to include matching interfaces (**interface**) -- Regex to exclude matching interfaces (**skip-interface**) -- A list of IP ranges in CIDR format to determine valid IP addresses on the node to choose from (**cidrs**) - -For more details on autodetection methods, see [node configuration](../../reference/configure-calico-node.mdx) reference. - -## How to - -- [Change the autodetection method](#change-the-autodetection-method) -- [Manually configure IP address and subnet for a node](#manually-configure-ip-address-and-subnet-for-a-node) - -### Change the autodetection method - - - - -As noted previously, the default autodetection method is **first valid interface found** (first-found). To use a different autodetection method, edit the default [Installation](../../reference/installation/api.mdx#operator.tigera.io/v1.Installation) custom resource, specifying the method. Below are examples of the supported autodetection methods: - -:::note - -To configure the default autodetection method for IPv6 for any of the below methods, use the field `nodeAddressAutodetectionV6`. - -::: - -- **Kubernetes Node IP** - - {{prodname}} will select the first internal IP address listed in the Kubernetes node's `Status.Addresses` field. - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - kubernetes: NodeInternalIP - ``` - -- **Source address used to reach an IP or domain name** - - {{prodname}} will choose the IP address that is used to reach the given "can reach" IP address or domain. For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - canReach: 8.8.8.8 - ``` - -- **Including matching interfaces** - - {{prodname}} will choose an address on each node from an interface that matches the given [regex](https://pkg.go.dev/regexp). - For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - interface: eth.* - ``` - -- **Excluding matching interfaces** - - {{prodname}} will choose an address on each node from an interface that does not match the given [regex](https://pkg.go.dev/regexp). - For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - skipInterface: eth.* - ``` - -- **Including CIDRs** - - {{prodname}} will select any IP address from the node that falls within the given CIDRs. For example: - - ```yaml - kind: Installation - apiVersion: operator.tigera.io/v1 - metadata: - name: default - spec: - calicoNetwork: - nodeAddressAutodetectionV4: - cidrs: - - "192.168.200.0/24" - ``` - - - - -As noted previously, the default autodetection method is **first valid interface found** (first-found). To use a different autodetection method, use the following `kubectl set env` command, specifying the method: - -- **IPv4** - - ```bash - kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD= - ``` - -- **IPv6** - - ```bash - kubectl set env daemonset/calico-node -n kube-system IP6_AUTODETECTION_METHOD= - ``` - -Where autodetection methods are based on: - -- **Kubernetes Node IP** - - {{prodname}} will select the first internal IP address listed in the Kubernetes node's `Status.Addresses` field. - - ```bash - kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=kubernetes-internal-ip - ``` - -- **Source address used to reach an IP or domain name** - - {{prodname}} will choose the IP address that is used to reach the given "can reach" IP address or domain. For example: - - ```bash - kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=can-reach=www.google.com - ``` - -- **Including matching interfaces** - - {{prodname}} will choose an address on each node from an interface that matches the given [regex](https://pkg.go.dev/regexp). - For example: - - ```bash - kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=interface=eth.* - ``` - -- **Excluding matching interfaces** - - {{prodname}} will choose an address on each node from an interface that does not match the given [regex](https://pkg.go.dev/regexp). - For example: - - ```bash - kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=skip-interface=eth.* - ``` - -- **Including CIDRs** - - {{prodname}} will select any IP address from the node that falls within the given CIDRs. For example: - - ```bash - kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=cidr=192.168.200.0/24,172.15.0.0/24 - ``` - - - - -### Manually configure IP address and subnet for a node - -In the following scenarios, you may want to configure a specific IP and subnet: - -- Hosts with multiple external interfaces -- Host interfaces with multiple IP addresses -- Changes to cross subnet packet encapsulation -- Changes to host IP address - - - - -You can configure specific IP address and subnet for a node by disabling IP autodetection and then updating the [Node resource](../../reference/resources/node.mdx). - -#### Disable autodetection - -To disable autodetection method, update the proper `NodeAddressAutodetection` field in the Installation resource: - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - calicoNetwork: - nodeAddressAutodetectionV4: {} - nodeAddressAutodetectionV6: {} -``` - -#### Configure IP and subnet using node resource - -You can configure the IP address and subnet on a Node resource. - -Use `calicoctl patch` to update the current node configuration. For example: - -```bash -calicoctl patch node kind-control-plane \ - --patch='{"spec":{"bgp": {"ipv4Address": "10.0.2.10/24", "ipv6Address": "fd80:24e2:f998:72d6::/120"}}}' -``` - - - - -You can configure specific IP address and subnet for a node using environment variables or by updating the [Node resource](../../reference/resources/node.mdx). Because you can configure IP address and subnet using either environment variables or node resource, the following table describes how values are synchronized. - -| **If this environment variable...** | **Is...** | **Then...** | -| ----------------------------------- | ----------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | -| IP/IP6 | Explicitly set | The specified values are used, and the Node resource is updated. | -| | Set to autodetect | The requested method is used (first-found, can-reach, interface, skip-interface, kubernetes-internal-ip), and the Node resource is updated. | -| | Not set, but Node resource has IP/IP6 values | Node resource value is used. | -| IP | Not set, and there is no IP value in Node resource | Autodetects an IPv4 address and subnet, and updates Node resource. | -| IP6 | Not set, and there is a no IP6 value in Node resource | No IP6 routing is performed on the node. | - -#### Configure IP and subnet using environment variables - -To configure IP and subnet values using environment variables, use a `kubectl set env` command. For example: - -```bash -kubectl set env daemonset/calico-node -n kube-system IP=10.0.2.10/24 IP6=fd80:24e2:f998:72d6::/120 -``` - -:::note - -If the subnet is omitted, the defaults are: /32 (IPv4) and /128 (IPv6). We recommend that you include the subnet information for clarity when specifying IP addresses. - -::: - -#### Configure IP and subnet using node resource - -You can also configure the IP address and subnet on a Node resource. - -:::note - -When configuring the IP address on a Node resource, you may want to disable IP address options or environment variables on the node. IP options on the container take precedence, and will overwrite the values you configure on the node resource. - -::: - -Use `calicoctl patch` to update the current node configuration. For example: - -```bash -calicoctl patch node kind-control-plane \ - --patch='{"spec":{"bgp": {"ipv4Address": "10.0.2.10/24", "ipv6Address": "fd80:24e2:f998:72d6::/120"}}}' -``` - - - - -## Additional resources - -- For details on autodetection methods, see the [node configuration](../../reference/configure-calico-node.mdx) reference. -- For calicoctl environment variables, see [Configuring {{nodecontainer}}](../../reference/configure-calico-node.mdx) -- [Node resource](../../reference/resources/node.mdx) -- [Reference documentation for calicoctl patch](../../reference/calicoctl/patch.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/ipam/ipv6-control-plane.mdx b/calico_versioned_docs/version-3.25/networking/ipam/ipv6-control-plane.mdx deleted file mode 100644 index 4b0a6c0556..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/ipv6-control-plane.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -description: Configure the Kubernetes control plane to operate over IPv6 for dual stack or IPv6 only. ---- - -# Configure Kubernetes control plane to operate over IPv6 - -## Big picture - -If you have IPv6 connectivity between your nodes and workloads, you may also want to configure the Kubernetes control plane to operate over IPv6, instead of IPv4. - -## How to - -To configure Kubernetes components for IPv6 only, set the following flags. - -| Component | **Flag** | **Value/Content** | -| --------------------------- | --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| **kube-apiserver** | `--bind-address` or `--insecure-bind-address` | Set to the appropriate IPv6 address or `::` for all IPv6 addresses on the host. | -| | `--advertise-address` | Set to the IPv6 address that nodes should use to access the `kube-apiserver`. | -| **kube-controller-manager** | `--master` | Set with the IPv6 address where the `kube-apiserver` can be accessed. | -| **kube-scheduler** | `--master` | Set with the IPv6 address where the `kube-apiserver` can be accessed. | -| **kubelet** | `--address` | Set to the appropriate IPv6 address or `::` for all IPv6 addresses. | -| | `--cluster-dns` | Set to the IPv6 address that will be used for the service DNS; this must be in the range used for `--service-cluster-ip-range`. | -| | `--node-ip` | Set to the IPv6 address of the node. | -| **kube-proxy** | `--bind-address` | Set to the appropriate IPv6 address or `::` for all IPv6 addresses on the host. | -| | `--master` | Set with the IPv6 address where the `kube-apiserver` can be accessed. | - -For dual stack settings, see [Enable IPv4/IPv6 dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#prerequisites). diff --git a/calico_versioned_docs/version-3.25/networking/ipam/ipv6.mdx b/calico_versioned_docs/version-3.25/networking/ipam/ipv6.mdx deleted file mode 100644 index 7311ce7b96..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/ipv6.mdx +++ /dev/null @@ -1,233 +0,0 @@ ---- -description: Configure dual stack or IPv6 only for workloads. ---- - -# Configure dual stack or IPv6 only - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Configure {{prodname}} IP address allocation to use dual stack or IPv6 only for workload communications. - -## Value - -Workload communication over IPv6 is increasingly desirable, as well as or instead of IPv4. {{prodname}} supports: - -- **IPv4 only** (default) - - Each workload gets an IPv4 address, and can communicate over IPv4. - -- **Dual stack** - - Each workload gets an IPv4 and an IPv6 address, and can communicate over IPv4 or IPv6. - -- **IPv6 only** - - Each workload gets an IPv6 address, and can communicate over IPv6. - -## Before you begin - -**{{prodname}} requirements** - -- {{prodname}} IPAM - -**Kubernetes version requirements** - -- For dual stack, 1.16 and later -- For one IP stack at a time (IPv4 or IPv6), any Kubernetes version - -**Kubernetes IPv6 host requirements** - -- An IPv6 address that is reachable from the other hosts -- The sysctl setting, `net.ipv6.conf.all.forwarding`, is set to `1`. - This ensures both Kubernetes service traffic and {{prodname}} traffic is forwarded appropriately. -- A default IPv6 route - -**Kubernetes IPv4 host requirements** - -- An IPv4 address that is reachable from the other hosts -- The sysctl setting, `net.ipv4.conf.all.forwarding`, is set to `1`. - This ensures both Kubernetes service traffic and {{prodname}} traffic is forwarded appropriately. -- A default IPv4 route - -## How to - -:::note - -The following tasks are only for new clusters. - -::: - -- [Enable IPv6 only](#enable-ipv6-only) -- [Enable dual stack](#enable-dual-stack) - -### Enable IPv6 only - - - - -To configure an IPv6-only cluster using the operator, edit your default Installation at install time to include a single IPv6 pool, and no IPv4 pools. For example: - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - calicoNetwork: - # Note: The ipPools section cannot be modified post-install. - ipPools: - - blockSize: 122 - cidr: 2001::00/64 - encapsulation: None - natOutgoing: Enabled - nodeSelector: all() -``` - - - - -1. Set up a new Kubernetes cluster with an IPv6 pod CIDR and service IP range. - -1. Using the [{{prodname}} Kubernetes install guide](../../getting-started/kubernetes/self-managed-onprem/onpremises.mdx), download the correct {{prodname}} manifest for the cluster and datastore type. - -1. Edit the CNI config (calico-config ConfigMap in the manifest) to disable IPv4 assignments and enable IPv6 assignments. - - ``` - "ipam": { - "type": "calico-ipam", - "assign_ipv4": "false", - "assign_ipv6": "true" - }, - ``` - -1. Configure IPv6 support by adding the following variable settings to the environment for the `calico-node` container: - - | Variable name | Value | - | ------------------- | ------------ | - | `IP6` | `autodetect` | - | `FELIX_IPV6SUPPORT` | `true` | - - :::note - - If your IPv6 IP pools include private IP addresses, pods that are assigned private IP addresses cannot perform outbound NAT by default. - - To enable outbound NAT for these pods, add `CALICO_IPV6POOL_NAT_OUTGOING: true` to the environment of the `calico-node` container. - - ::: - -1. For clusters **not** provisioned with kubeadm (see note below), configure the default IPv6 IP pool by adding the following variable setting to the environment for the `calico-node` container: - - | Variable name | Value | - | ---------------------- | ------------------------------------------------------------------------------------------------------- | - | `CALICO_IPV6POOL_CIDR` | the same as the IPv6 range you configured as the cluster CIDR to kube-controller-manager and kube-proxy | - - :::note - - For clusters provisioned with kubeadm, {{prodname}} autodetects the IPv4 and IPv6 pod CIDRs and does not require configuration. - - ::: - -1. Apply the edited manifest with `kubectl apply -f`. - - New pods will get IPv6 addresses, and can communicate with each other and the outside world over IPv6. - -**(Optional) Update host to not look for IPv4 addresses** - -If you want your workloads to have IPv6 addresses only, because you do not have IPv4 addresses or connectivity -between your nodes, complete these additional steps to tell {{prodname}} not to look for any IPv4 addresses. - -1. Disable [IP autodetection of IPv4](ip-autodetection.mdx) by setting `IP` to `none`. -1. Calculate the {{prodname}} BGP router ID for IPv6 using either of the following methods. - - Set the environment variable `CALICO_ROUTER_ID=hash` on {{nodecontainer}}. - This configures {{prodname}} to calculate the router ID based on the hostname. - - Pass a unique value for `CALICO_ROUTER_ID` to each node individually. - - - - -### Enable dual stack - -1. Set up a new cluster following the Kubernetes [prerequisites](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#prerequisites) and [enablement steps](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#enable-ipv4-ipv6-dual-stack). - - - - -To configure dual-stack cluster using the operator, edit your default Installation at install time to include both an IPv4 and IPv6 pool. For example: - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - # Configures Calico networking. - calicoNetwork: - # Note: The ipPools section cannot be modified post-install. - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/21 - encapsulation: IPIP - natOutgoing: Enabled - nodeSelector: all() - - blockSize: 122 - cidr: 2001::00/64 - encapsulation: None - natOutgoing: Enabled - nodeSelector: all() -``` - - - - -1. Using the [{{prodname}} Kubernetes install guide](../../getting-started/kubernetes/self-managed-onprem/onpremises.mdx), download the correct {{prodname}} manifest for the cluster and datastore type. - -1. Edit the CNI config (`calico-config` ConfigMap in the manifest), and enable IPv4 and IPv6 address allocation by setting both fields to true. - - ``` - "ipam": { - "type": "calico-ipam", - "assign_ipv4": "true", - "assign_ipv6": "true" - }, - ``` - -1. Configure IPv6 support by adding the following variable settings to the environment for the `calico-node` container: - - | Variable name | Value | - | ------------------- | ------------ | - | `IP6` | `autodetect` | - | `FELIX_IPV6SUPPORT` | `true` | - - :::note - - If your IPv6 IP pools include private IP addresses, pods that are assigned private IP addresses cannot perform outbound NAT by default. - - To enable outbound NAT for these pods, add `CALICO_IPV6POOL_NAT_OUTGOING: true` to the environment of the `calico-node` container. - - ::: - -1. For clusters **not** provisioned with kubeadm (see note below), configure the default IPv6 IP pool by adding the following variable setting to the environment for the `calico-node` container: - - | Variable name | Value | - | ---------------------- | ------------------------------------------------------------------------------------------------------------ | - | `CALICO_IPV6POOL_CIDR` | the same as the IPv6 range you configured as the IPv6 cluster CIDR to kube-controller-manager and kube-proxy | - - :::note - - For clusters provisioned with kubeadm, {{prodname}} autodetects the IPv4 and IPv6 pod CIDRs and does not require configuration. - - ::: - -1. Apply the edited manifest with `kubectl apply -f`. - - New pods will get both IPv4 and IPv6 addresses, and can communicate with each other and the outside world over IPv4 or IPv6. - - - - -## Additional resources - -- [Configure Kubernetes control plane to operate over IPv6](ipv6-control-plane.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/ipam/legacy-firewalls.mdx b/calico_versioned_docs/version-3.25/networking/ipam/legacy-firewalls.mdx deleted file mode 100644 index ccd25c9dd7..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/legacy-firewalls.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: Restrict the IP address chosen for a pod to a specific range of IP addresses. ---- - -# Restrict a pod to use an IP address in a specific range - -import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx'; - -## Big picture - -Restrict the IP address chosen for a pod to a specific range of IP addresses. - -## Value - -When Kubernetes pods interact with external systems that make decisions based on IP ranges (for example legacy firewalls), it can be useful to define several IP ranges and explicitly assign pods to those ranges. Using {{prodname}} IP Address Management (IPAM), you can restrict a pod to use an address from within a specific range. - -## Concepts - -### Kubernetes pod CIDR - -The **Kubernetes pod CIDR** is the range of IPs Kubernetes expects pod IPs to be assigned from. It is defined for the entire cluster and is used by various Kubernetes components to determine whether an IP belongs to a pod. For example, kube-proxy treats traffic differently if that traffic is from a pod than if it is not. All pod IPs must be in the CIDR range for Kubernetes to function correctly. - -### IP Pool - -**IP pools** are ranges of IP addresses from which {{prodname}} assigns pod IPs. By default, {{prodname}} creates an IP pool for the entire Kubernetes pod CIDR, but you can change this to break the pod CIDR up into several pools. You can control which pool {{prodname}} uses for each pod using node selectors, or annotations on the pod or the pod’s namespace. - -## Before you begin... - -The features in this How to guide require: - -- {{prodname}} IPAM - - - -Additionally, cluster administrators must have [configured IP pools](../../reference/resources/ippool.mdx) to define the valid IP ranges to use for allocating pod IP addresses. - -## How to - -### Restrict a pod to use an IP address range - -Annotate the pod with key `cni.projectcalico.org/ipv4pools` and/or `cni.projectcalico.org/ipv6pools` and value set to a list of IP pool names, enclosed in brackets. For example: - -``` -cni.projectcalico.org/ipv4pools: '["pool-1", "pool-2"]' -``` - -Note the use of the escaped \" for the inner double quotes around the pool names. - -### Restrict all pods within a namespace to use an IP address range - -Annotate the namespace with key `cni.projectcalico.org/ipv4pools` and/or `cni.projectcalico.org/ipv6pools` and value set to a list of IP pool names, enclosed in brackets. For example: - -``` -cni.projectcalico.org/ipv4pools: '["pool-1", "pool-2"]' - -``` - -Note the use of the escaped `\"` for the inner double quotes around the pool names. - -If both the pod and the pod’s namespace have the annotation, the pod annotation takes precedence. - -The annotation must be present at the time the pod is created. Adding it to an existing pod has no effect. - -## Additional resources - -For help configuring {{prodname}} IPAM, see [Configuring the {{prodname}} CNI Plugins](../../reference/configure-cni-plugins.mdx). diff --git a/calico_versioned_docs/version-3.25/networking/ipam/migrate-pools.mdx b/calico_versioned_docs/version-3.25/networking/ipam/migrate-pools.mdx deleted file mode 100644 index 1b55341884..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/migrate-pools.mdx +++ /dev/null @@ -1,227 +0,0 @@ ---- -description: Migrate pods from one IP pool to another on a running cluster without network disruption. ---- - -# Migrate from one IP pool to another - -import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx'; - -## Big picture - -Migrate pods from one IP pool to another on a running cluster without network disruption. - -## Value - -Pods are assigned IP addresses from IP pools that you configure in {{prodname}}. As the number of pods increase, you may need to increase the number of addresses available for pods to use. Or, you may need to move pods from a CIDR that was used by mistake. {{prodname}} lets you migrate from one IP pool to another one on a running cluster without network disruption. - -## Concepts - -### IP pools and cluster CIDRs - -{{prodname}} supports using multiple disjoint IP pool CIDRs within the cluster. However, Kubernetes expects that all pods have addresses within the same cluster CIDR. This means that although it is technically feasible to create an IP pool outside of the cluster CIDR, we do not recommend it. Pods allocated addresses outside of the Kubernetes cluster CIDR will lose network connectivity. - -## Before you begin... - -**Verify that you are using {{prodname}} IPAM**. - - - -**Verify orchestrator support for changing the pod network CIDR**. - -Although Kubernetes supports changing the pod network CIDR, not all orchestrators do. Check your orchestrator documentation to verify. - -## How to - -### Migrate from one IP pool to another - -Follow these steps to migrate pods from one IP pool to another pool. - -:::note - -If you follow these steps, existing pod connectivity will not be affected. (If you delete the old IP pool before you create and verify the new pool, existing pods will be affected.) When pods are deleted, applications may be temporarily unavailable (depending on the type of application); plan accordingly. - -::: - -1. Add a new IP pool. - -:::note - -It is highly recommended that your Calico IP pools are within the Kubernetes cluster CIDR. If pods IPs are allocated -from outside of the Kubernetes cluster CIDR, some traffic flows may have NAT applied unnecessarily causing unexpected behavior. - -::: - -1. Disable the old IP pool. - -:::note - -Disabling an IP pool only prevents new IP address allocations; it does not affect the networking of existing pods. - -::: - -1. Delete pods from the old IP pool. This includes any new pods that may have been created with the old IP pool prior to disabling the pool. - -1. Verify that new pods get an address from the new IP pool. - -1. Delete the old IP pool. - -## Tutorial - -In the following example, we created a Kubernetes cluster using **kubeadm**. But the IP pool CIDR we configured (192.168.0.0/16) doesn't match the -Kubernetes cluster CIDR. Let's change the CIDR to **10.0.0.0/16**, which for the purposes of this example falls within the cluster CIDR. - -Let’s run `calicoctl get ippool -o wide` to see the IP pool, **default-ipv4-ippool**. - -``` -NAME CIDR NAT IPIPMODE VXLANMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always Never false -``` - -When we run `calicoctl get wep --all-namespaces`, we see that a pod is created using the default range (192.168.52.130/32). - -``` -NAMESPACE WORKLOAD NODE NETWORKS INTERFACE -kube-system coredns-6f4fd4bdf-8q7zp vagrant 192.168.52.130/32 cali800a63073ed -``` - -Let’s get started changing this pod to the new IP pool (10.0.0.0/16). - -### Step 1: Add a new IP pool - -We add a new **IPPool** with the CIDR range, **10.0.0.0/16**. - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: new-pool -spec: - cidr: 10.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Let’s verify the new IP pool. - -```bash -calicoctl get ippool -o wide - -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always false -new-pool 10.0.0.0/16 true Always false -``` - -### Step 2: Disable the old IP pool - -List the existing IP pool definition. - -```bash -calicoctl get ippool -o yaml > pools.yaml - -``` - -```yaml -apiVersion: projectcalico.org/v3 -items: - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: default-ipv4-ippool - spec: - cidr: 192.0.0.0/16 - ipipMode: Always - natOutgoing: true - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: new-pool - spec: - cidr: 10.0.0.0/16 - ipipMode: Always - natOutgoing: true -``` - -Edit pools.yaml. - -Disable this IP pool by setting: `disabled: true` - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: default-ipv4-ippool -spec: - cidr: 192.0.0.0/16 - ipipMode: Always - natOutgoing: true - disabled: true -``` - -Apply the changes. - -Remember, disabling a pool only affects new IP allocations; networking for existing pods is not affected. - -```bash -calicoctl apply -f pools.yaml -``` - -Verify the changes. - -```bash -calicoctl get ippool -o wide -``` - -``` -NAME CIDR NAT IPIPMODE DISABLED -default-ipv4-ippool 192.168.0.0/16 true Always true -new-pool 10.0.0.0/16 true Always false -``` - -### Step 3: Delete pods from the old IP pool - -Next, we delete all of the existing pods from the old IP pool. (In our example, **coredns** is our only pod; for multiple pods you would trigger a deletion for all pods in the cluster.) - -```bash -kubectl delete pod -n kube-system coredns-6f4fd4bdf-8q7zp -``` - -### Step 4: Verify that new pods get an address from the new IP pool - -1. Create a test namespace and nginx pod. - - ```bash - kubectl create ns ippool-test - ``` - -1. Create an nginx pod. - - ```bash - kubectl -n ippool-test create deployment nginx --image nginx - ``` - -1. Verify that the new pod gets an IP address from the new range. - - ```bash - kubectl -n ippool-test get pods -l app=nginx -o wide - ``` - -1. Clean up the ippool-test namespace. - - ```bash - kubectl delete ns ippool-test - ``` - -### Step 5: Delete the old IP pool - -Now that you've verified that pods are getting IPs from the new range, you can safely delete the old pool. - -```bash -calicoctl delete pool default-ipv4-ippool -``` - -## Additional resources - -- [IP pools reference](../../reference/resources/ippool.mdx) diff --git a/calico_versioned_docs/version-3.25/networking/ipam/use-specific-ip.mdx b/calico_versioned_docs/version-3.25/networking/ipam/use-specific-ip.mdx deleted file mode 100644 index a83d62b044..0000000000 --- a/calico_versioned_docs/version-3.25/networking/ipam/use-specific-ip.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -description: Specify the IP address for a pod instead of allowing Calico to automatically choose one. ---- - -# Use a specific IP address with a pod - -import DetermineIpam from '@site/calico_versioned_docs/version-3.25/_includes/content/_determine-ipam.mdx'; - -## Big picture - -Choose the IP address for a pod instead of allowing Calico to choose automatically. - -## Value - -Some applications require the use of stable IP addresses. Also, you may want to create entries in external DNS servers that point directly to pods, and this requires static IPs. - -## Concepts - -### Kubernetes pod CIDR - -The **Kubernetes pod CIDR** is the range of IPs Kubernetes expects pod IPs to be assigned from. It is defined for the entire cluster and is used by various Kubernetes components to determine whether an IP belongs to a pod. For example, kube-proxy treats traffic differently if an IP is from a pod than if it is not. All pod IPs must be in the CIDR range for Kubernetes to function correctly. - -**IP Pools** - -IP pools are ranges of IP addresses from which Calico assigns pod IPs. Static IPs must be in an IP pool. - -## Before you begin... - -Your cluster must be using Calico IPAM to use this feature. - - - -## How to - -Annotate the pod with cni.projectcalico.org/ipAddrs set to a list of IP addresses to assign, enclosed in brackets. For example: - -``` - "cni.projectcalico.org/ipAddrs": "[\"192.168.0.1\"]" -``` - -Note the use of the escaped `\"` for the inner double quotes around the addresses. - -The address must be within a configured Calico IP pool and not currently in use. The annotation must be present when the pod is created; adding it later has no effect. - -Note that currently only a single IP address is supported per-pod using this annotation. - -### Reserving IPs for manual assignments - -The `cni.projectcalico.org/ipAddrs` annotation requires the IP address to be within an IP pool. This means that, -by default, {{prodname}} may decide to use the IP address that you select for another workload or for an internal -tunnel address. To prevent this, there are several options: - -- To reserve a whole IPPool for manual allocations, you can set its [node selector](../../reference/resources/ippool.mdx) to `"!all()"`. Since the `!all()` - cannot match any nodes, the IPPool will not be used for any automatic assignments. - -- To reserve part of a pool, you can create an [`IPReservation` resource](../../reference/resources/ipreservation.mdx). This allows for certain IPs to be reserved so - that Calico IPAM will not use them automatically. However, manual assignments (using the annotation) can still use - IPs that are "reserved". - -- To prevent {{prodname}} from using IPs from a certain pool for internal IPIP and/or VXLAN tunnel addresses, you - can set the `allowedUses` field on the [IPPool](../../reference/resources/ippool.mdx) to `["Workload"]`. - -## Additional resources - -For help configuring Calico CNI and Calico IPAM, see [Configuring the Calico CNI Plugins](../../reference/configure-cni-plugins.mdx). diff --git a/calico_versioned_docs/version-3.25/networking/openstack/configuration.mdx b/calico_versioned_docs/version-3.25/networking/openstack/configuration.mdx deleted file mode 100644 index 2ede08d76f..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/configuration.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -description: Configure OpenStack components for Calico. ---- - -# Configure systems for use with Calico - -When running {{prodname}} with OpenStack, you also need to configure various -OpenStack components, as follows. - -## Nova (/etc/nova/nova.conf) - -{{prodname}} uses the Nova metadata service to provide metadata to VMs, -without any proxying by Neutron. To make that work: - -- An instance of the Nova metadata API must run on every compute node. -- `/etc/nova/nova.conf` must not set `service_neutron_metadata_proxy` - or `service_metadata_proxy` to `True`. (The default `False` value is - correct for a {{prodname}} cluster.) - -## Neutron server (/etc/neutron/neutron.conf) - -In `/etc/neutron/neutron.conf` you need the following settings to -configure the Neutron service. - -| Setting | Value | Meaning | -| -------------------- | --------- | ---------------------------------------- | -| core_plugin | calico | Use the {{prodname}} core plugin | -| -------------------- | --------- | ---------------------------------------- | - -{{prodname}} can operate either as a core plugin or as an ML2 mechanism driver. The -function is the same both ways, except that floating IPs are only supported -when operating as a core plugin; hence the recommended setting here. - -However, if you don't need floating IPs and have other reasons for using ML2, -you can, instead, set - -| Setting | Value | Meaning | -| -------------------- | -------------------------------------- | ---------------------- | -| core_plugin | neutron.plugins.ml2.plugin.ML2Plugin | Use ML2 plugin | -| -------------------- | -------------------------------------- | ---------------------- | - -and then the further ML2-specific configuration as covered below. - -The following options in the `[calico]` section of `/etc/neutron/neutron.conf` govern how -the {{prodname}} plugin/driver and DHCP agent connect to the {{prodname}} etcd -datastore. You should set `etcd_host` to the IP of your etcd server, and `etcd_port` if -that server is using a non-standard port. If the etcd server is TLS-secured, also set: - -- `etcd_cert_file` to a client certificate, which must be signed by a Certificate - Authority that the server trusts - -- `etcd_key_file` to the corresponding private key file - -- `etcd_ca_cert_file` to a file containing data for the Certificate Authorities that you - trust to sign the etcd server's certificate. - -| Setting | Default Value | Meaning | -| ----------------- | ------------- | ------------------------------------------------------------ | -| etcd_host | 127.0.0.1 | The hostname or IP of the etcd server | -| etcd_port | 2379 | The port to use for the etcd node/proxy | -| etcd_key_file | | The path to the TLS key file to use with etcd | -| etcd_cert_file | | The path to the TLS client certificate file to use with etcd | -| etcd_ca_cert_file | | The path to the TLS CA certificate file to use with etcd | - -In a [multi-region deployment](multiple-regions.mdx), -`[calico] openstack_region` configures the name of the region that the local compute or controller -node belongs to. - -| Setting | Default Value | Meaning | -| ------------------ | ------------- | ---------------------------------------------------------------------------- | -| `openstack_region` | none | The name of the region that the local compute of controller node belongs to. | - -When specified, the value of `openstack_region` must be a string of lower case alphanumeric -characters or '-', starting and ending with an alphanumeric character, and must match the value of -[`OpenStackRegion`](../../reference/felix/configuration.mdx#openstack-specific-configuration) -configured for the Felixes in the same region. - -## ML2 (.../ml2_conf.ini) - -In `/etc/neutron/plugins/ml2/ml2_conf.ini` you need the following -settings to configure the ML2 plugin. - -| Setting | Value | Meaning | -| -------------------- | ----------- | --------------------------------- | -| mechanism_drivers | calico | Use {{prodname}} | -| type_drivers | local, flat | Allow 'local' and 'flat' networks | -| tenant_network_types | local, flat | Allow 'local' and 'flat' networks | diff --git a/calico_versioned_docs/version-3.25/networking/openstack/connectivity.mdx b/calico_versioned_docs/version-3.25/networking/openstack/connectivity.mdx deleted file mode 100644 index 6eae1bf1c9..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/connectivity.mdx +++ /dev/null @@ -1,255 +0,0 @@ ---- -description: Configure OpenStack networking for Calico. ---- - -# IP addressing and connectivity - -An OpenStack deployment is of limited use if its VMs cannot reach and be -reached by the outside world. This document will explain how to -configure your {{prodname}}-based OpenStack deployment to ensure that you have -the desired connectivity with the outside world. - -## Major differences from standard OpenStack - -If you've deployed OpenStack before you'll be thinking in terms of -routers, floating IPs, and external networks. {{prodname}}'s focus on -simplicity means that it doesn't use any of these concepts. This section -is mostly a warning: even if you think you know what you're doing, -please read the rest of this article. You might be surprised! - -## Setting up connectivity - -### Part 0: Deciding your address ranges - -For {{prodname}}, it's best to pick up to three address ranges you're going to -use from the following three options. If it's possible, use all three. - -The first option is an IPv6 address range, assuming you want your VMs to -have IPv6 connectivity. Note that you can only use this range if your -data center network can route IPv6 traffic. All IPv6 addresses should be -considered 'externally reachable', so this needs to be a range that will -be routed to your gateway router: ideally globally scoped. - -The second option is a 'private' IPv4 range, assuming you want your VMs -to have IPv4 connectivity. This is the most likely range for you to -configure. This range will contain all VMs that cannot be reached by -traffic that originates from outside the data center. - -The third option is a 'public' IPv4 range, assuming you want your VMs to -have IPv4 connectivity. This range will contain all the VMs that want to -be reachable by traffic that originates from outside the data center. -Make sure that traffic destined for this range from outside the data -center will be routed to your gateway, or nothing will work! - -The minimum requirement is one of those address ranges. - -### Part 1: Configuring the fabric - -Your {{prodname}} deployment will require a gateway router. In most -non-trivial cases this will be a heavy-duty router, but if you're -deploying a smaller network (maybe for testing purposes) and don't have -access to one you can use a Linux server in the role. - -The gateway router needs to be on the default route for all of your -compute hosts. This is to ensure that all traffic destined to leave the -data center goes via the gateway. That means that in a flat L3 topology -the gateway router needs to be set as the next hop. In a more complex -setup such as a multi-tier L3 topology the next hop may need to be -slightly shorter, for example to a top-of-rack router, which will in -turn need to route towards the gateway router. - -Then, the gateway router needs to be a BGP peer of the {{prodname}} network. -This could be a peer of one or more route reflectors, or in smaller -topologies directly peering with the compute hosts. This is to ensure it -knows the routes to all the VMs, so that it knows which way to route -traffic destined for them. Instructions for configuring your gateway -(and potentially BGP route reflectors) are beyond the scope of this -document. If you don't know how to do this or want to know how {{prodname}} -fits into your existing deployment, please get in touch on our mailing -list: it is difficult to add a generic solution to this problem to this -article. - -If your gateway uses eBGP to advertise routes externally, you'll need to -configure the BGP policy on the gateway to ensure that it does not -export routes to the private IPv4 address range you configured above. -Otherwise, in smaller deployments, you just need to make sure that -external traffic destined for your VMs will get routed to the gateway. -How you do this is outside the scope of this document: please ask for -assistance on our mailing list. - -Finally, configure your gateway to do stateful PNAT for any traffic -coming from the IPv4 internal range. This ensures that even VMs that -cannot be directly reached from the external network can still contact -servers themselves, to do things like request software updates. -Again, the actual manner in which this is configured depends on your -router. - -### Part 2: Set up OpenStack - -In OpenStack, you want to set up two shared Neutron networks. For the -first, add one IPv4 subnet containing the 'external' IPv4 range. Make -sure the subnet has a gateway IP, and that DHCP is enabled. -Additionally, add one IPv6 subnet containing half your IPv6 range, again -with a gateway IP and DHCP enabled. Make sure this network has a name -that makes it clear that it's for your 'externally accessible' VMs. -Maybe even mark it an 'external' network, though that has no effect on -what {{prodname}} does. - -For the second network, add one IPv4 subnet containing the 'private' -IPv4 range and one IPv6 subnet containing the other half of your IPv6 -range, both with gateway IPs and DHCP enabled. Make sure this network -has a name that makes it clear that it's for your 'private' VMs. Note -that if you give this network part of your IPv6 range these VMs will all -be reachable over IPv6. It is expected that all users will want to -deploy in this way, but if you don't, either don't give these VMs IPv6 -addresses or give them private ones that are not advertised by your -gateway. - -Then, configure the default network, subnet, router and floating IP -quota for all tenants to be 0 to prevent them from creating more -networks and confusing themselves! - -A sample configuration is below, showing the networks and two of the -four subnets (as they differ only in their address ranges, all other -configuration is the same). - -From the controller, issue the following Neutron CLI command. - -```bash -neutron net-list -``` - -It returns a list of the networks. - -``` -+--------------------------------------+----------+----------------------------------------------------------+ -| id | name | subnets | -+--------------------------------------+----------+----------------------------------------------------------+ -| 8d5dec25-a6aa-4e18-8706-a51637a428c2 | external | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead 172.18.208.0/24 | -| | | cf6ceea0-dde0-4018-ab9a-f8f68935622b 2001:db8:a41:2::/64 | -| fa52b704-7b3c-4c83-8698-244807352711 | internal | 301b3e63-5324-4d62-8e22-ed8dddd50689 10.65.0.0/16 | -| | | bf94ccb1-c57c-4c9a-a873-c20cbfa4ecaf 2001:db8:a41:3::/64 | -+--------------------------------------+----------+----------------------------------------------------------+ -``` - -Next, check the details of the `external` network. - -```bash -neutron net-show external -``` - -It should return something like the following. - -``` -+---------------------------+--------------------------------------+ -| Field | Value | -+---------------------------+--------------------------------------+ -| admin_state_up | True | -| id | 8d5dec25-a6aa-4e18-8706-a51637a428c2 | -| name | external | -| provider:network_type | local | -| provider:physical_network | | -| provider:segmentation_id | | -| router:external | True | -| shared | True | -| status | ACTIVE | -| subnets | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead | -| | cf6ceea0-dde0-4018-ab9a-f8f68935622b | -| tenant_id | ed34337f935745bb911eeb741bc4374b | -+---------------------------+--------------------------------------+ -``` - -Check the details of the `internal` network. - -```bash -neutron net-show internal -``` - -It should return something like the following. - -``` -+---------------------------+--------------------------------------+ -| Field | Value | -+---------------------------+--------------------------------------+ -| admin_state_up | True | -| id | fa52b704-7b3c-4c83-8698-244807352711 | -| name | internal | -| provider:network_type | local | -| provider:physical_network | | -| provider:segmentation_id | | -| router:external | False | -| shared | True | -| status | ACTIVE | -| subnets | 301b3e63-5324-4d62-8e22-ed8dddd50689 | -| | bf94ccb1-c57c-4c9a-a873-c20cbfa4ecaf | -| tenant_id | ed34337f935745bb911eeb741bc4374b | -+---------------------------+--------------------------------------+ -``` - -Check the `external4` subnet. - -```bash -neutron subnet-show external4 -``` - -It should return something like the following. - -``` -+------------------+----------------------------------------------------+ -| Field | Value | -+------------------+----------------------------------------------------+ -| allocation_pools | {"start": "172.18.208.2", "end": "172.18.208.255"} | -| cidr | 172.18.208.0/24 | -| dns_nameservers | | -| enable_dhcp | True | -| gateway_ip | 172.18.208.1 | -| host_routes | | -| id | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead | -| ip_version | 4 | -| name | external4 | -| network_id | 8d5dec25-a6aa-4e18-8706-a51637a428c2 | -| tenant_id | ed34337f935745bb911eeb741bc4374b | -+------------------+----------------------------------------------------+ -``` - -Check the `external6` subnet. - -```bash -neutron subnet-show external6 -``` - -It should return something like the following. - -``` -+------------------+-----------------------------------------------------------------------------+ -| Field | Value | -+------------------+-----------------------------------------------------------------------------+ -| allocation_pools | {"start": "2001:db8:a41:2::2", "end": "2001:db8:a41:2:ffff:ffff:ffff:fffe"} | -| cidr | 2001:db8:a41:2::/64 | -| dns_nameservers | | -| enable_dhcp | True | -| gateway_ip | 2001:db8:a41:2::1 | -| host_routes | | -| id | cf6ceea0-dde0-4018-ab9a-f8f68935622b | -| ip_version | 6 | -| name | external6 | -| network_id | 8d5dec25-a6aa-4e18-8706-a51637a428c2 | -| tenant_id | ed34337f935745bb911eeb741bc4374b | -+------------------+-----------------------------------------------------------------------------+ -``` - -## Part 3: Start using your networks - -At this stage, all configuration is done! When you spin up a new VM, you -have to decide if you want it to be contactable from outside the data -center. If you do, give it a network interface on the `external` -network: otherwise, give it one on the `internal` network. Obviously, a -machine that originally wasn't going to be reachable can be made -reachable by plugging a new interface into it on the `external` network. - -Right now we don't support address mobility, so an address is tied to a -single port until that port is no longer in use. We plan to address this -in the future. - -The next step in configuring your OpenStack deployment is to configure -security. We'll have a document addressing this shortly. diff --git a/calico_versioned_docs/version-3.25/networking/openstack/dev-machine-setup.mdx b/calico_versioned_docs/version-3.25/networking/openstack/dev-machine-setup.mdx deleted file mode 100644 index 9b7882ce1e..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/dev-machine-setup.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -description: Configure Calico networking for OpenStack VMs. ---- - -# Set up a development machine - -In this example, a user wants to spin up a machine to use as a Linux -development environment. This user has a straightforward use-case: they -want a GUI and SSH access, but relatively little else. - -This user is provisioned with a single OpenStack user and single -OpenStack tenant. Neutron will automatically provision them with a -single security group, `default`, that contains the following rules: - -- allow all inbound traffic from machines in the `default` security - group -- allow all outbound traffic to anywhere - -Per the instructions in [IP addressing and connectivity](connectivity.mdx), this user cannot create -Neutron networks or subnets, but they do have access to the networks -created by the administrator: `external` and `internal`. - -Because the user wants to be able to reach the machine from their own -laptop, they need the machine to be reachable from outside the data -center. In vanilla Neutron, this would mean provisioning it with a -floating IP, but in {{prodname}} they instead want to make sure the VM is -attached to the `external` network. To add themselves to this network, -the user needs to find out the UUID for it. - -```bash -neutron net-list -``` - -This should return something like the following. - -``` -+--------------------------------------+----------+----------------------------------------------------------+ -| id | name | subnets | -+--------------------------------------+----------+----------------------------------------------------------+ -| 8d5dec25-a6aa-4e18-8706-a51637a428c2 | external | 54db559c-5e1d-4bdc-83b0-c479ef2a0ead 172.18.208.0/24 | -| | | cf6ceea0-dde0-4018-ab9a-f8f68935622b 2001:db8:a41:2::/64 | -| fa52b704-7b3c-4c83-8698-244807352711 | internal | 301b3e63-5324-4d62-8e22-ed8dddd50689 10.65.0.0/16 | -| | | bf94ccb1-c57c-4c9a-a873-c20cbfa4ecaf 2001:db8:a41:3::/64 | -+--------------------------------------+----------+----------------------------------------------------------+ -``` - -In the example above, the `external` network has the UUID -`8d5dec25-a6aa-4e18-8706-a51637a428c2`. Thus, the machine can be created -with the following `nova boot` command. - -```bash -nova boot --flavor m1.medium \ - --image debian-wheezy-amd64 \ - --security-groups default \ - --nic "netid=8d5dec25-a6aa-4e18-8706-a51637a428c2" \ - development-server -``` - -This places the VM with a single NIC in the `external` network. The VM -starts to boot, and Neutron allocates it an IP address in the `external` -network: in this case, both an IPv4 and IPv6 address, as you can see -below: - -``` -+--------------------------------------+-----------------------------------------------------------+ -| Property | Value | -+--------------------------------------+-----------------------------------------------------------+ -| external network | 2001:db8:a41:2::1c, 172.18.208.85 | -| flavor | m1.medium (3) | -| hostId | b80247c27400fc9048ca569c8635f00801654bf676a00d8f08887215 | -| id | e36f4e62-0efa-4188-87b8-8c96dc6e6028 | -| name | development-server | -| security_groups | default | -+--------------------------------------+-----------------------------------------------------------+ -``` - -While the machine boots, the security group can be configured. It needs -four extra rules: one for SSH and three for VNC. In this example, -developer's personal machine has the IPv4 address 191.64.52.12, and -that's the only machine they'd like to be able to access their machine. -For that reason, they add the four security group rules as follows. - -To add the SSH ingress rule: - -```bash -neutron security-group-rule-create --protocol tcp \ - --port-range-min 22 \ - --port-range-max 22 \ - --direction ingress \ - --remote-ip-prefix 191.64.52.12/32 \ - --ethertype IPv4 \ - default -``` - -To add the first VNC rule: - -```bash -neutron security-group-rule-create --protocol tcp \ - --port-range-min 5800 \ - --port-range-max 5801 \ - --direction ingress \ - --remote-ip-prefix 191.64.52.12/32 \ - --ethertype IPv4 \ - default -``` - -To add the second VNC rule: - -```bash -neutron security-group-rule-create --protocol tcp \ - --port-range-min 5900 \ - --port-range-max 5901 \ - --direction ingress \ - --remote-ip-prefix 191.64.52.12/32 \ - --ethertype IPv4 \ - default -``` - -To add the third VNC rule: - -```bash -neutron security-group-rule-create --protocol tcp \ - --port-range-min 6000 \ - --port-range-max 6001 \ - --direction ingress \ - --remote-ip-prefix 191.64.52.12/32 \ - --ethertype IPv4 \ - default -``` - -At this stage, the developer's machine is up and running. It can be -reached on its public IP (172.18.208.85), and the developer confirms -this by SSHing into their box. They're now ready to go. diff --git a/calico_versioned_docs/version-3.25/networking/openstack/floating-ips.mdx b/calico_versioned_docs/version-3.25/networking/openstack/floating-ips.mdx deleted file mode 100644 index 468bda0bfc..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/floating-ips.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -description: Configure floating IPs in Calico for OpenStack. ---- - -# Floating IPs - -networking-calico includes beta support for floating IPs. Currently this -requires running {{prodname}} as a Neutron core plugin (i.e. `core_plugin = calico`) instead of as an ML2 mechanism driver. - -:::note - -We would like it to work as an ML2 mechanism driver too—patches -and/or advice welcome! - -::: - -To set up a floating IP, you need the same pattern of Neutron data model -objects as you do for Neutron in general, which means: - -- a tenant network, with an instance attached to it, that will be the target of - the floating IP - -- a Neutron router, with the tenant network connected to it - -- a provider network with `router:external True` that is set as the - router's gateway (e.g. with `neutron router-gateway-set`), and with a - subnet with a CIDR that floating IPs will be allocated from - -- a floating IP, allocated from the provider network subnet, that maps onto the - instance attached to the tenant network. - -For example: - -1. Create tenant network and subnet. - - ```bash - neutron net-create --shared calico - neutron subnet-create --gateway 10.65.0.1 --enable-dhcp --ip-version 4 --name calico-v4 calico 10.65.0.0/24 - ``` - -1. Boot a VM on that network. - - ```bash - nova boot [...] - ``` - -1. Find its Neutron port ID. - - ```bash - neutron port-list - ``` - -1. Create an external network and subnet; this is where floating - IPs will be allocated from. - - ```bash - neutron net-create public --router:external True - neutron subnet-create public 172.16.1.0/24 - ``` - -1. Create a router connecting the tenant and external networks. - - ```bash - neutron router-create router1 - neutron router-interface-add router1 - neutron router-gateway-set router1 public - ``` - -1. Create a floating IP and associate it with the target VM. - - ```bash - neutron floatingip-create public - neutron floatingip-associate - ``` - - Then the {{prodname}} agents will arrange that the floating IP is routed to the - instance's compute host, and then DNAT'd to the instance's fixed IP address. - -1. From a compute node, issue the following command. - - ```bash - ip r - ``` - - It should return the routing table. - - ``` - default via 10.240.0.1 dev eth0 - 10.65.0.13 dev tap9a7e0868-da scope link - 10.65.0.14 via 192.168.8.4 dev l2tpeth8-3 proto bird - 10.65.0.23 via 192.168.8.4 dev l2tpeth8-3 proto bird - 10.240.0.1 dev eth0 scope link - 172.16.1.3 dev tap9a7e0868-da scope link - 192.168.8.0/24 dev l2tpeth8-3 proto kernel scope link src 192.168.8.3 - 192.168.122.0/24 dev virbr0 proto kernel scope link src 192.168.122.1 - ``` - -1. Issue the following command to review iptables. - - ```bash - sudo iptables -L -n -v -t nat - ``` - - It should return something like the following. - - ``` - [...] - Chain felix-FIP-DNAT (2 references) - pkts bytes target prot opt in out source destination - 0 0 DNAT all -- * * 0.0.0.0/0 172.16.1.3 to:10.65.0.13 - - Chain felix-FIP-SNAT (1 references) - pkts bytes target prot opt in out source destination - 0 0 SNAT all -- * * 10.65.0.13 10.65.0.13 to:172.16.1.3 - - Chain felix-OUTPUT (1 references) - pkts bytes target prot opt in out source destination - 1 60 felix-FIP-DNAT all -- * * 0.0.0.0/0 0.0.0.0/0 - - Chain felix-POSTROUTING (1 references) - pkts bytes target prot opt in out source destination - 1 60 felix-FIP-SNAT all -- * * 0.0.0.0/0 0.0.0.0/0 - - Chain felix-PREROUTING (1 references) - pkts bytes target prot opt in out source destination - 0 0 felix-FIP-DNAT all -- * * 0.0.0.0/0 0.0.0.0/0 - 0 0 DNAT tcp -- * * 0.0.0.0/0 169.254.169.254 tcp dpt:80 to:127.0.0.1:8775 - [...] - ``` diff --git a/calico_versioned_docs/version-3.25/networking/openstack/host-routes.mdx b/calico_versioned_docs/version-3.25/networking/openstack/host-routes.mdx deleted file mode 100644 index 233b2a0009..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/host-routes.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -description: Options for host routing with Calico. ---- - -# Host routes - -Neutron allows "host routes" to be configured on a subnet, with each host route -comprising - -- an IP destination prefix -- a next hop IP for routing to that prefix. - -When an instance is launched and gets an IP from that subnet, Neutron arranges, -via DHCP, that the instance's routing table gets those routes. - -## With {{prodname}}, a host route's next hop IP should be the local host - -networking-calico supports host routes, but it's important to note that a host -route is only consistent with {{prodname}} when its next hop IP represents the local -hypervisor. This is because the local hypervisor, in a {{prodname}} setup, _always_ -routes all data from an instance and so is always the next hop IP for data to -any destination. If the instance's routing table has a route with some other -next hop IP, that next hop IP address will effectively be ignored, and the data -will likely _not_ pass through the implied router; instead the data will go -first to the hypervisor, and then the hypervisor's routing table will determine -its next IP hop from there. - -Specifically, each host route's next hop IP should be the gateway IP of the -subnet that the desired instance NIC is attached to, and from which it got its -IP address - where 'desired instance NIC' means the one that you want data for -that host route to go through. In networking-calico's usage, subnet gateway -IPs represent the local hypervisor, because data sent by an instance is always -routed there. - -:::note - -networking-calico avoids unnecessary IP usage by using the subnet -gateway IP to represent the local compute host, on every compute host where -that subnet is being used. Although that might initially sound odd, it works -because no data is ever sent to or from the gateway IP address; the gateway -IP is only used as the next hop address for the first IP hop from an instance -to its compute host, and then the compute host routes the data again, -according to its routing table, to wherever it needs to go. This also means -that the gateway IP address really is functioning as each instance's default -gateway, in the generally understood sense. - -::: - -## When are host routes useful with {{prodname}}? - -Host routes are useful with {{prodname}} when an instance has multiple NICs and you -want to specify which NIC should be used for data to particular prefixes. - -When an instance has multiple NICs, it should have a default route through only -one of those NICs, and use non-default routes to direct appropriate traffic -through the other NICs. Neutron host routes can be used to establish those -non-default routes; alternatively they can also be programmed manually in the -instance. - -For example, suppose an instance has eth0 attached to a subnet with gateway -10.65.0.1, eth1 attached to a subnet with gateway 11.8.0.1, and a default route -via eth0. Then a host route like - -```bash -11.11.0.0/16,11.8.0.1 -``` - -can be configured for the subnet, to say that data to 11.11.0.0/16 should go -out through eth1. The instance's routing table will then be: - -```bash -default via 10.65.0.1 dev eth0 -10.65.0.0/24 dev eth0 -11.8.0.0/24 dev eth1 -11.11.0.0/16 via 11.8.0.1 dev eth1 -``` - -When an instance only has a single network attachment, and so a single NIC, -host routes cannot make any difference to how data is routed, so it is -unhelpful (although also harmless) to configure them. Regardless of what the -instance's routing table says, data must exit over the single NIC, and is -always layer-2-terminated and rerouted by the host according to the host's -routing table. It's required for the host's routing table to cover whatever -destinations instances may want to send to, and host routes don't add anything -to that. diff --git a/calico_versioned_docs/version-3.25/networking/openstack/index.mdx b/calico_versioned_docs/version-3.25/networking/openstack/index.mdx deleted file mode 100644 index 372f82ee43..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure Calico networking in an OpenStack deployment. -hide_table_of_contents: true ---- - -# Calico networking for OpenStack - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/networking/openstack/ipv6.mdx b/calico_versioned_docs/version-3.25/networking/openstack/ipv6.mdx deleted file mode 100644 index be335c5f1d..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/ipv6.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: Prepare a VM guest OS for IPv6. ---- - -# Prepare a VM guest OS for IPv6 - -## Big picture - -Prepare a VM guest OS for IPv6. - -## How to - -OpenStack (not {{prodname}}) controls whether a VM gets IPv4, IPv6, or both addresses. Calico simply honors the addresses that OpenStack specifies. The following extra steps are required for **IPv6 only** and **dual stack** deployments -- so the guest OS can learn its IPv6 address (if assigned by OpenStack). - -1. Verify that the guest VM image meets these requirements for IPv6 connectivity. - - - When booting up, the VM must issue a DHCPv6 request for each of its interfaces, so that it can learn the IPv6 addresses that OpenStack allocates for it. If the VM uses the widely-deployed **DHCP client from ISC**, it must have a fix/workaround for [this known issue](https://kb.isc.org/docs/aa-01141). - - The VM must be configured to accept router advertisements. - - Although not all common cloud images meet these requirements yet, it is easy to remedy by launching an image, making appropriate changes to its configuration files, taking a snapshot, and then using the snapshot thereafter instead of the original image. - - For example, starting from an **Ubuntu cloud image**, the following changes meet the requirements listed. - - - In `/etc/network/interfaces.d/eth0.cfg`, add: - ```bash - iface eth0 inet6 dhcp - accept_ra 1 - ``` - - - In `/sbin/dhclient-script`, add at the start of the script: - ```bash - `new_ip6_prefixlen=128` - ``` - - - In `/etc/sysctl.d`, create a file named `30-eth0-rs-delay.conf` with - contents: - ```bash - `net.ipv6.conf.eth0.router_solicitation_delay = 10` - ``` - - For **CentOS**, these additions to a cloud-init script have been reported to be effective: - - runcmd: - - - `sed -i -e '$a'"IPV6INIT=yes" /etc/sysconfig/network-scripts/ifcfg-eth0` - - `sed -i -e '$a'"DHCPV6C=yes" /etc/sysconfig/network-scripts/ifcfg-eth0` - - `sed -i '/PATH/i\new_ip6_prefixlen=128' /sbin/dhclient-script` - - `systemctl restart network` - -1. Configure IPv6 support in {{prodname}} by defining an IPv6 subnet in each Neutron network with: - - - The IPv6 address range that you want your VMs to use - - DHCP enabled - - IPv6 address mode set to DHCPv6 stateful - - We suggest that you initially configure both IPv4 and IPv6 subnets in each network. This allows handling VM images that support only IPv4 alongside those that support both IPv4 and IPv6, and allows a VM to be accessed over IPv4 in case this is needed to troubleshoot any issues with its IPv6 configuration. In principle, though, we are not aware of any problems with configuring and using IPv6-only networks in OpenStack. diff --git a/calico_versioned_docs/version-3.25/networking/openstack/kuryr.mdx b/calico_versioned_docs/version-3.25/networking/openstack/kuryr.mdx deleted file mode 100644 index da12609925..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/kuryr.mdx +++ /dev/null @@ -1,40 +0,0 @@ ---- -description: Use Kuryr with Calico networking. ---- - -# Kuryr - -networking-calico works with Kuryr; this means using Neutron, with the {{prodname}} -ML2 driver, to provide networking for container workloads. - -You can use DevStack to install a single node {{prodname}}/Kuryr system, with a -`local.conf` file like this: - -```bash -[[local|localrc]] -ADMIN_PASSWORD=015133ea2bdc46ed434c -DATABASE_PASSWORD=d0060b07d3f3631ece78 -RABBIT_PASSWORD=6366743536a8216bde26 -SERVICE_PASSWORD=91eb72bcafb4ddf246ab -SERVICE_TOKEN=c5680feca5e2c9c8f820 - -enable_plugin networking-calico git://git.openstack.org/openstack/networking-calico -enable_plugin kuryr git://git.openstack.org/openstack/kuryr -enable_service kuryr -enable_service etcd-server -enable_service docker-engine - -LOGFILE=stack.log -LOG_COLOR=False -``` - -Please follow general Kuryr instructions for creating a Docker network that -uses Kuryr as its backend, and for launching containers on that network. Then -if you look at the IP routing table and iptables, you will see {{prodname}} routes to -the containers. - -## {{prodname}} for containers without Kuryr - -The {{prodname}} project also provides networking for containers more directly, -without Neutron and Kuryr as intermediaries. Please see [Getting Started](../../getting-started/index.mdx) - for details. diff --git a/calico_versioned_docs/version-3.25/networking/openstack/labels.mdx b/calico_versioned_docs/version-3.25/networking/openstack/labels.mdx deleted file mode 100644 index 5ee51ed09b..0000000000 --- a/calico_versioned_docs/version-3.25/networking/openstack/labels.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -description: Use Calico labels to define policy for OpenStack VMs. ---- - -# Endpoint labels and operator policy - -When {{prodname}} represents an OpenStack VM as a {{prodname}} WorkloadEndpoint, -it puts labels on the WorkloadEndpoint to identify the project, security groups and -namespace that the VM belongs to. The deployment operator can use these labels to -configure {{prodname}} policy that is additional to the policy defined by OpenStack -security groups, and that cannot be overridden by user-level security group config. - -## VM endpoint labels - -For the VM's OpenStack project (previously known as 'tenant'), those labels are: - -| Label Name | Value | -| ------------------------------------------------- | -------------------------------- | -| `projectcalico.org/openstack-project-id` | `` | -| `projectcalico.org/openstack-project-name` | `` | -| `projectcalico.org/openstack-project-parent-id` | `` | -| ------------------------------------------------- | -------------------------------- | - -For each security group that the VM belongs to, those labels are: - -| Label Name | Value | -| ------------------------------------------------------------- | ------------------------- | -| `sg.projectcalico.org/openstack-` | `` | -| `sg-name.projectcalico.org/openstack-` | `` | -| ------------------------------------------------------------- | ------------------------- | - -For the VM's {{prodname}} namespace, the label is: - -| Label Name | Value | -| --------------------------------- | ---------------------- | -| `projectcalico.org/namespace` | `` | -| --------------------------------- | ---------------------- | - -When `[calico] openstack_region` has been configured in `/etc/neutron/neutron.conf` (as -recommended for [multiple region deployments](multiple-regions.mdx)) the namespace will be -"openstack-region-" followed by the configured region name. Otherwise it is simply -"openstack". - -:::note - -To allow {{prodname}} to provide the project name and parent ID labels, -you must give Neutron the 'admin' role within your cluster: - -``` -openstack role add --project service --user neutron admin -``` - -or some equivalent privilege that allows the Neutron server to do admin-level queries of -the Keystone database. This is because {{prodname}}'s driver runs as part of the -Neutron server, and needs to query the Keystone database for the information for those -labels. If Neutron isn't sufficiently privileged, {{prodname}} will fall back to -not generating those labels. - -::: - -:::note - -{{prodname}} only allows certain characters in label names and values -(alphanumerics, '-', '\_', '.' and '/'), so if a project or security group name normally -has other characters, those will be replaced here by '\_'. Also there is a length -limit, so particularly long names may be truncated. - -::: - -:::note - -{{prodname}} does not support changing project name or security group -name for a given ID associated with a VM after the VM has been created. It is -recommended that operators avoid any possible confusion here by not changing project -name for a particular project ID or security group name for particular security group -ID, post-creation. - -::: - -## Configuring operator policy - -Configuring operator policy requires the `calicoctl` executable, so you should -[install](../../operations/calicoctl/install.mdx) and -[configure calicoctl](../../operations/calicoctl/configure/overview.mdx) - if you -haven't done so already. - -- Calico for OpenStack deployments use an etcd datastore, so you should follow the - instructions for an etcd datastore. - -- The settings you need for etcd endpoints, and TLS credentials if your deployment uses - those, should match what you have in your - [`neutron.conf`](configuration.mdx) - and [Felix](../../reference/felix/configuration.mdx) - configurations. - -## Example - -Now you can configure {{prodname}} operator policy that will apply before the policy -that is derived from OpenStack security groups. For example, to prevent any possible -communication between the "superman" and "lexluthor" projects, you could configure the -following. - -```bash -calicoctl apply -f - < - ``` - - where `` is the name of the region that that compute host belongs to. - -1. In `/etc/neutron/neutron.conf` on each controller and compute node, add - - ```conf - [calico] - openstack_region = - ``` - - where `` is the name of the region that that node belongs to. - -:::note - -the value specified for `OpenStackRegion` and `openstack_region` -must be a string of lower case alphanumeric characters or '-', starting and -ending with an alphanumeric character. - -::: - -:::caution - -If the Felix and Neutron values here do not match, OpenStack -will not be able to launch any VMs in that region, because the Neutron server -for the region will think that there are no working compute nodes. - -::: - -### Configuring OpenStack - -You should now create networks in your OpenStack regions as normal. e.g. - -```bash - neutron net-create --shared calico - neutron subnet-create --gateway 10.65.0.1 --enable-dhcp --ip-version 4 --name calico-v4 calico 10.65.0.0/24 -``` - -:::note - -that Calico networking provides a flat L3 network, -so _subnets across all regions must not overlap_. -For example, having 10.1.0.0/16 in one region and 10.2.0.0/16 in another -would be fine, but 10.1.0.0/16 and 10.1.200.0/24 would not. - -::: - -## Configuring cross-region policy - -Suppose that: - -- you have two regions - -- you have a set of VMs in one region belonging to security group - a7734e61-b545-452d-a3cd-0189cbd9747a - -- you have a set of VMs in another region belonging to security group - 85cc3048-abc3-43cc-89b3-377341426ac5 - -- you want to allow the second set of VMs to connect to port 80 of the first - set. - -You need to have [calicoctl installed and configured for your cluster](labels.mdx#configuring-operator-policy) -. Once that is in place, -you could achieve the desired connectivity by using calicoctl to -configure this {{prodname}} policy: - -```bash -calicoctl apply -f - < mtu 16436 qdisc noqueue - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever - 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000 - link/ether fa:16:3e:28:a9:a4 brd ff:ff:ff:ff:ff:ff - inet 10.28.0.13/16 brd 10.28.255.255 scope global eth0 - inet6 fe80::f816:3eff:fe28:a9a4/64 scope link - valid_lft forever preferred_lft forever - ``` - -1. Next, issue the following command. - - ```bash - sudo ip a a 10.28.0.23/16 dev eth0 - ``` - -1. List the interfaces again. - - ```bash - ip a - ``` - - The interfaces should now look more like the following. - - ``` - 1: lo: mtu 16436 qdisc noqueue - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever - 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000 - link/ether fa:16:3e:28:a9:a4 brd ff:ff:ff:ff:ff:ff - inet 10.28.0.13/16 brd 10.28.255.255 scope global eth0 - inet 10.28.0.23/16 scope global secondary eth0 - inet6 fe80::f816:3eff:fe28:a9a4/64 scope link - valid_lft forever preferred_lft forever - ``` - -1. Exit the SSH session. - - ``` - Connection to 10.28.0.13 closed. - ``` - -1. And now we can access the VM on its service IP, as shown below. - - ```bash - core@access-node$ ssh cirros@10.28.0.23 - The authenticity of host '10.28.0.23 (10.28.0.23)' can't be established. - RSA key fingerprint is 65:a5:b0:0c:e2:c4:ac:94:2a:0c:64:b8:bc:5a:aa:66. - Are you sure you want to continue connecting (yes/no)? yes - - Warning: Permanently added '10.28.0.23' (RSA) to the list of known hosts. - cirros@10.28.0.23's password: - $ - ``` - - Note that we already have security set up that allows SSH to the instance from - our access machine (`192.168.8.1`). - -1. You can check this by listing the security groups. - - ```bash - neutron security-group-list - ``` - - It should return something like the following. - - ``` - +--------------------------------------+---------+----------------------------------------------------------------------+ - | id | name | security_group_rules | - +--------------------------------------+---------+----------------------------------------------------------------------+ - | 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | default | egress, IPv4 | - | | | egress, IPv6 | - | | | ingress, IPv4, 22/tcp, remote_ip_prefix: 192.168.8.1/32 | - | | | ingress, IPv4, remote_group_id: 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | - | | | ingress, IPv6, remote_group_id: 75fccd0a-ef3d-44cd-91ec-ef22941f50f5 | - | 903d9936-ce72-4756-a2cc-7c95a846e7e5 | default | egress, IPv4 | - | | | egress, IPv6 | - | | | ingress, IPv4, 22/tcp, remote_ip_prefix: 192.168.8.1/32 | - | | | ingress, IPv4, remote_group_id: 903d9936-ce72-4756-a2cc-7c95a846e7e5 | - | | | ingress, IPv6, remote_group_id: 903d9936-ce72-4756-a2cc-7c95a846e7e5 | - +--------------------------------------+---------+----------------------------------------------------------------------+ - ``` - -## Moving the service IP to another VM - -Service IPs are often used for HA, so need to be moved to target a different VM -if the first one fails for some reason (or if the HA system just decides to -cycle the active VM). - -1. To demonstrate that we create a second test VM. - - ```bash - nova boot --flavor m1.tiny --image cirros-0.3.2-x86_64 --nic net-name=demo-net testvm2 - ``` - -1. List the VMs. - - ```bash - nova list - ``` - - You should see the new VM in the list. - - ``` - +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ - | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ - | b6d8a3c4-9674-4972-9151-11107b60d622 | testvm1 | ACTIVE | - | Running | demo-net=10.28.0.13, 10.28.0.23 | - | bb4ef5e3-dc77-472e-af6f-3f0d8c3e5a6d | testvm2 | ACTIVE | - | Running | demo-net=10.28.0.14, fd5f:5d21:845:1c2e:2::e | - +--------------------------------------+---------+--------+------------+-------------+----------------------------------------------+ - ``` - -1. Check the ports. - - ```bash - neutron port-list - ``` - - It should return something like the following. - - ``` - +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ - | id | name | mac_address | fixed_ips | - +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ - | 656b3617-570d-473e-a5dd-90b61cb0c49f | | fa:16:3e:4d:d5:25 | | - | 7627a298-a2db-4a1a-bc07-9f0f10f58363 | | fa:16:3e:8e:dc:33 | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.14"} | - | | | | {"subnet_id": "345fec2e-6493-44de-a489-97b755c16dd4", "ip_address": "fd5f:5d21:845:1c2e:2::e"} | - | 9a7e0868-da7a-419e-a7ad-9d37e11091b8 | | fa:16:3e:28:a9:a4 | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.13"} | - | | | | {"subnet_id": "0a1221f2-e6ed-413d-a040-62a266bd0d8f", "ip_address": "10.28.0.23"} | - | a4b26bcc-ba94-4033-a9fc-edaf151c0c20 | | fa:16:3e:74:46:bd | | - | a772a5e1-2f13-4fc3-96d5-fa1c29717637 | | fa:16:3e:c9:c6:8f | | - +--------------------------------------+------+-------------------+------------------------------------------------------------------------------------------------+ - ``` - -1. Remove the service IP from the first VM. - - ```bash - neutron port-update --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.13 9a7e0868-da7a-419e-a7ad-9d37e11091b8 - ``` - -1. And add it to the second. - - ```bash - neutron port-update --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.14 \ - --fixed-ip subnet_id=0a1221f2-e6ed-413d-a040-62a266bd0d8f,ip_address=10.28.0.23 7627a298-a2db-4a1a-bc07-9f0f10f58363 - ``` - -1. SSH into `testvm2`. - - ```bash - core@access-node$ ssh cirros@10.28.0.14 - The authenticity of host '10.28.0.14 (10.28.0.14)' can't be established. - RSA key fingerprint is 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85. - Are you sure you want to continue connecting (yes/no)? yes - - Warning: Permanently added '10.28.0.14' (RSA) to the list of known hosts. - cirros@10.28.0.14's password: - ``` - -1. Tell `testvm2` that it now has the service IP `10.28.0.23`. - - ```bash - sudo ip a a 10.28.0.23/16 dev eth0 - ``` - -1. Now connections to `10.28.0.23` go to `testvm2` - - ```bash - core@access-node$ ssh cirros@10.28.0.23 - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @ - @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ - IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY! - Someone could be eavesdropping on you right now (man-in-the-middle attack)! - It is also possible that a host key has just been changed. - The fingerprint for the RSA key sent by the remote host is - 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85. - Please contact your system administrator. - Add correct host key in /home/core/.ssh/known_hosts to get rid of this message. - Offending RSA key in /home/core/.ssh/known_hosts:4 - RSA host key for 10.28.0.23 has changed and you have requested strict checking. - Host key verification failed. - ``` - -1. Remove the `known_hosts` files. - - ```bash - rm ~/.ssh/known_hosts - ``` - -1. Try again to SSH into the VM. - - ```bash - core@access-node$ ssh cirros@10.28.0.23 - The authenticity of host '10.28.0.23 (10.28.0.23)' can't be established. - RSA key fingerprint is 6a:02:7f:3a:bf:0c:91:de:c4:d6:e7:f6:81:3f:6a:85. - Are you sure you want to continue connecting (yes/no)? yes - - Warning: Permanently added '10.28.0.23' (RSA) to the list of known hosts. - cirros@10.28.0.23's password: - ``` - -1. Check the host name. - - ```bash - hostname - ``` - - It should return: - - ``` - testvm2 - ``` - -1. Check the interfaces. - - ``` - ip a - ``` - - They should look something like the following. - - ``` - 1: lo: mtu 16436 qdisc noqueue - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 - inet 127.0.0.1/8 scope host lo - inet6 ::1/128 scope host - valid_lft forever preferred_lft forever - 2: eth0: mtu 1500 qdisc pfifo_fast qlen 1000 - link/ether fa:16:3e:8e:dc:33 brd ff:ff:ff:ff:ff:ff - inet 10.28.0.14/16 brd 10.28.255.255 scope global eth0 - inet 10.28.0.23/16 scope global secondary eth0 - inet6 fe80::f816:3eff:fe8e:dc33/64 scope link - valid_lft forever preferred_lft forever - $ - ``` diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/etcd.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/etcd.mdx deleted file mode 100644 index 25be361af4..0000000000 --- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/etcd.mdx +++ /dev/null @@ -1,190 +0,0 @@ ---- -description: Sample configuration files etcd. ---- - -# Configure calicoctl to connect to an etcd datastore - -## Big picture - -Learn how to configure the calicoctl CLI tool for an etcd cluster. - -## Value - -The `calicoctl` CLI tool provides helpful administrative commands for interacting with a {{prodname}} cluster. - -## Concepts - -### calicoctl vs kubectl - -In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs. - -In newer releases, the Calico API server performs that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl. For this reason, we recommend -[installing the Calico API server](../../install-apiserver.mdx) and using `kubectl` instead of `calicoctl` for most operations. - -calicoctl is still required for the following subcommands: - -- [calicoctl node](../../../reference/calicoctl/node/index.mdx) -- [calicoctl ipam](../../../reference/calicoctl/ipam/index.mdx) -- [calicoctl convert](../../../reference/calicoctl/convert.mdx) -- [calicoctl version](../../../reference/calicoctl/version.mdx) - -calicoctl is also required for non-Kubernetes platforms such as OpenStack. - -## How to - -### Complete list of etcd configuration options - -| Configuration file option | Environment variable | Description | Schema | -| ------------------------- | -------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------- | -| `datastoreType` | `DATASTORE_TYPE` | Indicates the datastore to use. If unspecified, defaults to `kubernetes`. (optional) | `kubernetes`, `etcdv3` | -| `etcdEndpoints` | `ETCD_ENDPOINTS` | A comma-separated list of etcd endpoints. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379` (required) | string | -| `etcdDiscoverySrv` | `ETCD_DISCOVERY_SRV` | Domain name to discover etcd endpoints via SRV records. Mutually exclusive with `etcdEndpoints`. Example: `example.com` (optional) | string | -| `etcdUsername` | `ETCD_USERNAME` | User name for RBAC. Example: `user` (optional) | string | -| `etcdPassword` | `ETCD_PASSWORD` | Password for the given user name. Example: `password` (optional) | string | -| `etcdKeyFile` | `ETCD_KEY_FILE` | Path to the file containing the private key matching the `calicoctl` client certificate. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/calicoctl/key.pem` (optional) | string | -| `etcdCertFile` | `ETCD_CERT_FILE` | Path to the file containing the client certificate issued to `calicoctl`. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/calicoctl/cert.pem` (optional) | string | -| `etcdCACertFile` | `ETCD_CA_CERT_FILE` | Path to the file containing the root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures `calicoctl` to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing `calicoctl` to trust each of the CAs included. Example: `/etc/calicoctl/ca.pem` (optional) | string | -| `etcdKey` | | The private key matching the `calicoctl` client certificate. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. For example, please see below.(optional) | string | -| `etcdCert` | | The client certificate issued to `calicoctl`. Enables `calicoctl` to participate in mutual TLS authentication and identify itself to the etcd server. For example, please see below.(optional) | string | -| `etcdCACert` | | The root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures `calicoctl` to trust the CA that signed the root certificate. The config file may contain multiple root certificates, causing `calicoctl` to trust each of the CAs included. For example, please see below.(optional) | string | - -:::note - -- If you are running with TLS enabled, ensure your endpoint addresses use HTTPS. -- When specifying through environment variables, the `DATASTORE_TYPE` environment - is required for etcdv3. -- All environment variables may also be prefixed with `CALICO_`, for example - `CALICO_DATASTORE_TYPE` and `CALICO_ETCD_ENDPOINTS` etc. may also be used. - This is useful if the non-prefixed names clash with existing environment - variables defined on your system -- The Configuration file options `etcdCACert`, `etcdCert` and `etcdKey` does not have - corresponding environment variables. -- Previous versions of `calicoctl` supported `ETCD_SCHEME` and `ETC_AUTHORITY` environment - variables as a mechanism for specifying the etcd endpoints. These variables are - no longer supported. Use `ETCD_ENDPOINTS` instead. - -::: - -### Example configuration file - -```yaml -apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - etcdEndpoints: https://etcd1:2379,https://etcd2:2379,https://etcd3:2379 - etcdKeyFile: /etc/calico/key.pem - etcdCertFile: /etc/calico/cert.pem - etcdCACertFile: /etc/calico/ca.pem -``` - -### Example configuration file with inline CA certificate, client certificate and key - -```yaml -apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - datastoreType: etcdv3 - etcdEndpoints: 'https://127.0.0.1:2379' - etcdCACert: | - -----BEGIN CERTIFICATE----- - MIICKzCCAZSgAwIBAgIBAzANBgkqhkiG9w0BAQQFADA3MQswCQYDVQQGEwJVUzER - MA8GA1UEChMITmV0c2NhcGUxFTATBgNVBAsTDFN1cHJpeWEncyBDQTAeFw05NzEw - MTgwMTM2MjVaFw05OTEwMTgwMTM2MjVaMEgxCzAJBgNVBAYTAlVTMREwDwYDVQQK - EwhOZXRzY2FwZTENMAsGA1UECxMEUHViczEXMBUGA== - -----END CERTIFICATE----- - etcdCert: | - -----BEGIN CERTIFICATE----- - gI6iLXgMsp2EOlD56I6FA1jrCtNb01XQvX3eyFuA6g5T1jWGYBDtvQb0WRVkdUy9 - L/uK+sHQwtloCSuakcQAsWV9bajCQtHX8XGu25Yz56kpJ/OJjcishxT6pc/sthum - A5PX739JsNUi/p5aG+H/6eNx+ukJP7QaM646YCfS5i8S9DJUvim+/BSlKi2ZiOCd - 0MYH4Xb7lmAOTNmTvSYpKo9J2fZ9erw0MYSBTyjh6F7PRbHBiivgUnJfGQ== - -----END CERTIFICATE----- - etcdKey: | - -----BEGIN RSA PRIVATE KEY----- - k0dWj16h9P6TvfcNl2iwT4VIwx0uy2faWBED1DrCJcuQCy5nPrts2ZIaAWPi1t3t - VbDKQvs+KXBEeqh0qYcYkejUXqIF0uKUFLjiQmZssjpL5RHqqWuYKbO87n+Jod1L - TjGRHdbP0zF2U0LdjM17rc2hpJ3qrmgJ7pOLzbXMcOr+NP1ojRCArXhQ4iLs7D8T - eHw9QH4luJYtnmk7x03izLMQdLWcKnUbqh/xOVPyazgJHXwRxwNXpMsBVGY= - -----END RSA PRIVATE KEY----- -``` - -### Example using environment variables - -```bash -ETCD_ENDPOINTS=http://myhost1:2379 calicoctl get bgppeers -``` - -### Example using etcd DNS discovery - -```bash -ETCD_DISCOVERY_SRV=example.com calicoctl get nodes -``` - -### Example using IPv6 - -Create a single node etcd cluster listening on IPv6 localhost `[::1]`. - -```bash -etcd --listen-client-urls=http://[::1]:2379 --advertise-client-urls=http://[::1]:2379 -``` - -Use the etcd IPv6 cluster: - -```bash -ETCD_ENDPOINTS=http://[::1]:2379 calicoctl get bgppeers -``` - -### Example using mixed IPv4/IPv6 - -Create a single node etcd cluster listening on IPv4 and IPv6 localhost `[::1]`. - -```bash -etcd --listen-client-urls=http://[::1]:2379,http://127.0.0.1:2379 --advertise-client-urls=http://[::1]:2379 -``` - -Use the IPv6 endpoint: - -```bash -ETCD_ENDPOINTS=http://[::1]:2379 calicoctl get bgppeers -``` - -Use the IPv4 endpoint: - -```bash -ETCD_ENDPOINTS=http://127.0.0.1:2379 calicoctl get bgppeers -``` - -### {{nodecontainer}} - -It is important to note that not only will calicoctl will use the specified keys directly -on the host to access etcd, **it will also pass on these environment variables -and volume mount the keys into the started `{{noderunning}}` container.** - -Therefore, configuring `{{nodecontainer}}` for etcd is easily accomplished by running -`calicoctl node run` with the parameters set correctly. - -### Checking the configuration - -Here is a simple command to check that the installation and configuration is -correct. - -```bash -calicoctl get nodes -``` - -A correct setup will yield a list of the nodes that have registered. If an -empty list is returned you are either pointed at the wrong datastore or no -nodes have registered. If an error is returned then attempt to correct the -issue then try again. - -## Next steps - -Now you are ready to read and configure most aspects of {{prodname}}. You can -find the full list of commands in the -[Command Reference](../../../reference/calicoctl/overview.mdx). - -The full list of resources that can be managed, including a description of each, -can be found in the -[Resource Definitions](../../../reference/resources/overview.mdx). diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/index.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/index.mdx deleted file mode 100644 index 6aabb3c936..0000000000 --- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configure the calicoctl to access your datastore. -hide_table_of_contents: true ---- - -# Configure calicoctl - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/kdd.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/kdd.mdx deleted file mode 100644 index 2237e96501..0000000000 --- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/kdd.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: Sample configuration files for kdd. ---- - -# Configure calicoctl to connect to the Kubernetes API datastore - -## Big picture - -Learn how to configure the calicoctl CLI tool for your Kubernetes cluster. - -## Value - -The `calicoctl` CLI tool provides helpful administrative commands for interacting with a {{prodname}} cluster. - -## Concepts - -### calicoctl vs kubectl - -In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs. - -In newer releases, the Calico API server performs that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl. For this reason, we recommend -[installing the Calico API server](../../install-apiserver.mdx) and using `kubectl` instead of `calicoctl` for most operations. - -calicoctl is still required for the following subcommands: - -- [calicoctl node](../../../reference/calicoctl/node/index.mdx) -- [calicoctl ipam](../../../reference/calicoctl/ipam/index.mdx) -- [calicoctl convert](../../../reference/calicoctl/convert.mdx) -- [calicoctl version](../../../reference/calicoctl/version.mdx) - -### Default configuration - -By default, calicoctl will attempt to read from the Kubernetes API using the default kubeconfig located at `$(HOME)/.kube/config`. - -If the default kubeconfig does not exist, or you would like to specify alternative API access information, you can do so using the following configuration options. - -## How to - -### Complete list of Kubernetes API connection configuration - -| Configuration file option | Environment variable | Description | Schema | -| ------------------------- | -------------------- | --------------------------------------------------------------------------------------------------------- | ---------------------- | -| `datastoreType` | `DATASTORE_TYPE` | Indicates the datastore to use. [Default: `kubernetes`] | `kubernetes`, `etcdv3` | -| `kubeconfig` | `KUBECONFIG` | When using the Kubernetes datastore, the location of a kubeconfig file to use, e.g. /path/to/kube/config. | string | -| `k8sAPIEndpoint` | `K8S_API_ENDPOINT` | Location of the Kubernetes API. Not required if using kubeconfig. [Default: `https://kubernetes-api:443`] | string | -| `k8sCertFile` | `K8S_CERT_FILE` | Location of a client certificate for accessing the Kubernetes API, e.g., `/path/to/cert`. | string | -| `k8sKeyFile` | `K8S_KEY_FILE` | Location of a client key for accessing the Kubernetes API, e.g., `/path/to/key`. | string | -| `k8sCAFile` | `K8S_CA_FILE` | Location of a CA for accessing the Kubernetes API, e.g., `/path/to/ca`. | string | -| `k8sToken` | | Token to be used for accessing the Kubernetes API. | string | - -:::note - -All environment variables may also be prefixed with `"CALICO_"`, for -example `"CALICO_DATASTORE_TYPE"` and `"CALICO_KUBECONFIG"` etc. may be used. -This is useful if the non-prefixed names clash with existing environment -variables defined on your system. - -::: - -### Kubernetes command line - -```bash -DATASTORE_TYPE=kubernetes KUBECONFIG=~/.kube/config calicoctl get nodes -``` - -### Example configuration file - -```yaml -apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - datastoreType: 'kubernetes' - kubeconfig: '/path/to/.kube/config' -``` - -### Example using environment variables - -```bash -export DATASTORE_TYPE=kubernetes -export KUBECONFIG=~/.kube/config -calicoctl get workloadendpoints -``` - -And using `CALICO_` prefixed names: - -```bash -export CALICO_DATASTORE_TYPE=kubernetes -export CALICO_KUBECONFIG=~/.kube/config -calicoctl get workloadendpoints -``` - -With multiple `kubeconfig` files: - -```bash -export DATASTORE_TYPE=kubernetes -export KUBECONFIG=~/.kube/main:~/.kube/auxy -calicoctl get --context main workloadendpoints -calicoctl get --context auxy workloadendpoints -``` - -### Checking the configuration - -Here is a simple command to check that the installation and configuration is -correct. - -```bash -calicoctl get nodes -``` - -A correct setup will yield a list of the nodes that have registered. If an -empty list is returned you are either pointed at the wrong datastore or no -nodes have registered. If an error is returned then attempt to correct the -issue then try again. - -## Next steps - -Now you are ready to read and configure most aspects of {{prodname}}. You can -find the full list of commands in the -[Command Reference](../../../reference/calicoctl/overview.mdx). - -The full list of resources that can be managed, including a description of each, -can be found in the -[Resource Definitions](../../../reference/resources/overview.mdx). diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/overview.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/configure/overview.mdx deleted file mode 100644 index 9fe78ed2e3..0000000000 --- a/calico_versioned_docs/version-3.25/operations/calicoctl/configure/overview.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -description: Configure calicoctl for datastore access. ---- - -# Configure calicoctl - -## Big picture - -Learn how to configure the calicoctl CLI tool for your cluster. - -## Value - -The `calicoctl` CLI tool provides helpful administrative commands for interacting with a {{prodname}} cluster. - -## Concepts - -### calicoctl vs kubectl - -In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs. - -In newer releases, the Calico API server performs that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl. For this reason, we recommend -[installing the Calico API server](../../install-apiserver.mdx) and using `kubectl` instead of `calicoctl` for most operations. - -calicoctl is still required for the following subcommands: - -- [calicoctl node](../../../reference/calicoctl/node/index.mdx) -- [calicoctl ipam](../../../reference/calicoctl/ipam/index.mdx) -- [calicoctl convert](../../../reference/calicoctl/convert.mdx) -- [calicoctl version](../../../reference/calicoctl/version.mdx) - -calicoctl is also required for non-Kubernetes platforms such as OpenStack. - -### Default calicoctl behavior - -Most `calicoctl` commands require access to the {{prodname}} datastore. By default, calicoctl -will attempt to read from the Kubernetes API based on the default kubeconfig. - -## How to - -### Configure access using a Configuration file - -By default, `calicoctl` will look for a configuration file at `/etc/calico/calicoctl.cfg`. You can override this using the `--config` option with commands that require datastore access. -The file can be in either YAML or JSON format. It must be valid and readable by `calicoctl`. For example: - -```yaml noValidation -apiVersion: projectcalico.org/v3 -kind: CalicoAPIConfig -metadata: -spec: - datastoreType: "etcdv3" - etcdEndpoints: "http://etcd1:2379,http://etcd2:2379" - ... -``` - -### Configure access using environment variables - -If `calicoctl` cannot locate, read, or access a configuration file, it will check a specific set of environment variables. - -Refer to the section that corresponds to your datastore type for a full set of options -and examples. - -- [Kubernetes API datastore](kdd.mdx) - -- [etcd datastore](etcd.mdx) - -:::note - -When running `calicoctl` inside a container, any environment variables and -configuration files must be passed to the container so they are available to -the process inside. It can be useful to keep a running container (that sleeps) configured -for your datastore, then it is possible to `exec` into the container and have an -already configured environment. - -::: diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/index.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/index.mdx deleted file mode 100644 index ab048e5496..0000000000 --- a/calico_versioned_docs/version-3.25/operations/calicoctl/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install and configure the Calico CLI for managing resources. -hide_table_of_contents: true ---- - -# calicoctl - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/calicoctl/install.mdx b/calico_versioned_docs/version-3.25/operations/calicoctl/install.mdx deleted file mode 100644 index 4bb8954a57..0000000000 --- a/calico_versioned_docs/version-3.25/operations/calicoctl/install.mdx +++ /dev/null @@ -1,410 +0,0 @@ ---- -description: Install the CLI for Calico. ---- - -# Install calicoctl - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import CodeBlock from '@theme/CodeBlock'; - -import { releaseTitle } from '../../variables.js'; - -export function buildUrl() { - const url = - releaseTitle === 'master' - ? 'https://github.com/projectcalico/calico/releases/latest/download' - : `https://github.com/projectcalico/calico/releases/download/${releaseTitle}`; - return url; -} - -export const url = buildUrl(); - -## Big picture - -This guide helps you install the `calicoctl` command line tool to manage {{prodname}} resources -and perform administrative functions. - -## Value - -The `calicoctl` command line tool is required to use many of {{prodname}}'s features. It -is used to manage {{prodname}} policies and configuration, as well as view detailed cluster status. - -## Concepts - -### API groups - -All Kubernetes resources belong to an API group. The API group is indicated by the resource's `apiVersion`. For example, {{prodname}} -uses resources in the `projectcalico.org/v3` API group for configuration, and the operator uses resources in the `operator.tigera.io/v1` API group. - -You can read more about API groups in [the Kubernetes documentation](https://kubernetes.io/docs/reference/using-api/#api-groups). - -### calicoctl and kubectl - -To manage {{prodname}} APIs in the `projectcalico.org/v3` API group, you should use `calicoctl`. This is because -`calicoctl` provides important validation and defaulting for these resources that is not available in `kubectl`. However, `kubectl` -should still be used to manage other Kubernetes resources. - -:::note - -If you would like to use `kubectl` to manage `projectcalico.org/v3` API resources, you can use the -[Calico API server](../install-apiserver.mdx). - -::: - -:::caution - -Never modify resources in the `crd.projectcalico.org` API group directly. These are internal data representations -and modifying them directly may result in unexpected behavior. -In addition to resource management, `calicoctl` also enables other {{prodname}} administrative tasks such as viewing IP pool utilization -and BGP status. - -::: - -### Datastore - -{{prodname}} objects are stored in one of two datastores, either etcd or Kubernetes. The choice of datastore is determined at the time {{prodname}} -is installed. Typically for Kubernetes installations the Kubernetes datastore is the default. - -You can run `calicoctl` on any host with network access to the {{prodname}} datastore as either a binary or a container. -For step-by-step instructions, refer to the section that corresponds to your desired deployment. - - - - - -## How to - -:::note - -Make sure you always install the version of `calicoctl` that matches the version of {{prodname}} running on your cluster. - -::: - -- [Install calicoctl as a binary on a single host](#install-calicoctl-as-a-binary-on-a-single-host) -- [Install calicoctl as a kubectl plugin on a single host](#install-calicoctl-as-a-kubectl-plugin-on-a-single-host) -- [Install calicoctl as a container on a single host](#install-calicoctl-as-a-container-on-a-single-host) - -### Install calicoctl as a binary on a single host - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-linux-amd64 -o calicoctl - -1. Set the file to be executable. - - ```bash - chmod +x ./calicoctl - ``` - - :::note - - If the location of `calicoctl` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This will allow you to invoke it - without having to prepend its location. - - ::: - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-darwin-amd64 -o calicoctl - -1. Set the file to be executable. - - ```bash - chmod +x calicoctl - ``` - - :::note - - If you are faced with `cannot be opened because the developer cannot be verified` error when using `calicoctl` for the first time. - go to `Applications > System Preferences > Security & Privacy` in the `General` tab at the bottom of the window click `Allow anyway`. - - ::: - - :::note - - If the location of `calicoctl` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This will allow you to invoke it - without having to prepend its location. - - ::: - - - - -1. Use the following PowerShell command to download the `calicoctl` binary. - - :::tip - - Consider running PowerShell as administrator and navigating - to a location that's in your `PATH`. For example, `C:\Windows`. - - ::: - -Invoke-WebRequest -Uri "{url}/calicoctl-windows-amd64.exe" -OutFile "calicoctl.exe" - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-linux-ppc64le -o calicoctl - -1. Set the file to be executable. - - ```bash - chmod +x calicoctl - ``` - - :::note - - If the location of `calicoctl` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This will allow you to invoke it - without having to prepend its location. - - ::: - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-linux-arm64 -o calicoctl - -1. Set the file to be executable. - - ```bash - chmod +x calicoctl - ``` - - :::note - - If the location of `calicoctl` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This will allow you to invoke it - without having to prepend its location. - - ::: - - - - -### Install calicoctl as a kubectl plugin on a single host - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-linux-amd64 -o kubectl-calico - -1. Set the file to be executable. - - ```bash - chmod +x kubectl-calico - ``` - - :::note - - If the location of `kubectl-calico` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This is required in order for - kubectl to detect the plugin and allow you to use it. - - ::: - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-darwin-amd64 -o kubectl-calico - -1. Set the file to be executable. - - ```bash - chmod +x kubectl-calico - ``` - - :::note - - If you are faced with `cannot be opened because the developer cannot be verified` error when using `calicoctl` for the first time. - go to `Applications > System Preferences > Security & Privacy` in the `General` tab at the bottom of the window click `Allow anyway`. - - > If the location of `kubectl-calico` is not already in your `PATH`, move the file - > to one that is or add its location to your `PATH`. This is required in order for - > kubectl to detect the plugin and allow you to use it. - - ::: - - - - -1. Use the following PowerShell command to download the `calicoctl` binary. - - :::tip - - Consider running PowerShell as administrator and navigating - to a location that's in your `PATH`. For example, `C:\Windows`. - - ::: - -Invoke-WebRequest -Uri "{url}/calicoctl-windows-amd64.exe" -OutFile kubectl-calico.exe - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-linux-ppc64le -o kubectl-calico - -1. Set the file to be executable. - - ```bash - chmod +x kubectl-calico - ``` - - :::note - - If the location of `kubectl-calico` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This is required in order for - kubectl to detect the plugin and allow you to use it. - - ::: - - - - -1. Log into the host, open a terminal prompt, and navigate to the location where - you want to install the binary. - - :::tip - - Consider navigating to a location that's in your `PATH`. For example, - `/usr/local/bin/`. - - ::: - -1. Use the following command to download the `calicoctl` binary. - - curl -L {url}/calicoctl-linux-arm64 -o kubectl-calico - -1. Set the file to be executable. - - ```bash - chmod +x kubectl-calico - ``` - - :::note - - If the location of `kubectl-calico` is not already in your `PATH`, move the file - to one that is or add its location to your `PATH`. This is required in order for - kubectl to detect the plugin and allow you to use it. - - ::: - - - - -Verify the plugin works. - -``` -kubectl calico -h -``` - -You can now run any `calicoctl` subcommands through `kubectl calico`. - -:::note - -If you run these commands from your local machine (instead of a host node), some of -the node related subcommands will not work (like node status). - -::: - -### Install calicoctl as a container on a single host - -To install `calicoctl` as a container on a single host, log into the -target host and issue the following command. - -```bash -docker pull {{registry}}{{imageNames.calicoctl}}:{{releaseTitle}} -``` - -**Next step**: - -[Configure `calicoctl` to connect to your datastore](configure/index.mdx). diff --git a/calico_versioned_docs/version-3.25/operations/certificate-management.mdx b/calico_versioned_docs/version-3.25/operations/certificate-management.mdx deleted file mode 100644 index 6831aa2c76..0000000000 --- a/calico_versioned_docs/version-3.25/operations/certificate-management.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -description: Control the issuer of certificates used by Calico ---- - -# Manage TLS certificates used by Calico - -## Big picture - -Enable custom workflows for issuing and signing certificates used to secure communication between {{prodname}} components. - -## Value - -Some deployments have security requirements that strictly minimize or eliminate the access to private keys and/or -requirements to control the trusted certificates throughout clusters. Using the Kubernetes Certificates API that automates -certificate issuance, {{prodname}} provides a simple configuration option that you add to your installation. - -## Before you begin - -**Supported algorithms** - -- Private Key Pair: RSA (size: 2048, 4096, 8192), ECDSA (curve: 256, 384, 521) -- Certificate Signature: RSA (sha: 256, 384, 512), ECDSA (sha: 256, 384, 512) - -## How to - -- [Enable certificate management](#enable-certificate-management) -- [Verify and monitor](#verify-and-monitor) -- [Implement your own signing/approval process](#implement-your-own-signing-and-approval-process) - -### Enable certificate management - -1. Modify your [the installation resource](../reference/installation/api.mdx#operator.tigera.io/v1.Installation) - resource and add the `certificateManagement` section. Apply the following change to your cluster. - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - certificateManagement: - caCert: - signerName: / - signatureAlgorithm: SHA512WithRSA - keyAlgorithm: RSAWithSize4096 -``` - -Done! If you have an automatic signer and approver, there is nothing left to do. The next section explains in more detail -how to verify and monitor the status. - -### Verify and monitor - -1. Monitor your pods as they come up: - -``` -kubectl get pod -n calico-system -w -NAMESPACE NAME READY STATUS RESTARTS AGE -calico-system calico-node-5ckvq 0/1 Pending 0 0s -calico-system calico-typha-688c9957f5-h9c5w 0/1 Pending 0 0s -calico-system calico-node-5ckvq 0/1 Init:0/3 0 1s -calico-system calico-typha-688c9957f5-h9c5w 0/1 Init:0/1 0 1s -calico-system calico-node-5ckvq 0/1 PodInitializing 0 2s -calico-system calico-typha-688c9957f5-h9c5w 0/1 PodInitializing 0 2s -calico-system calico-node-5ckvq 1/1 Running 0 3s -calico-system calico-typha-688c9957f5-h9c5w 1/1 Running 0 3s -``` - -During the `Init` phase a certificate signing request (CSR) is created by an init container of the pod. It will be stuck in the -`Init` phase. Once the CSR has been approved and signed by the certificate authority, the pod continues with `PodInitializing` -and eventually `Running`. - -1. Monitor certificate signing requests: - -``` -kubectl get csr -w -NAME AGE REQUESTOR CONDITION -calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Pending -calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Pending,Issued -calico-system:calico-node-5ckvq:9a3a10 0s system:serviceaccount:calico-system:calico-node Approved,Issued -calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Pending -calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Pending,Issued -calico-system:typha-688c9957f5-h9c5w:2b0d82 0s system:serviceaccount:calico-system:calico-typha Approved,Issued -``` - -A CSR will be `Pending` until it has been `Issued` and `Approved`. The name of a CSR is based on the namespace, the pod -name and the first 6 characters of the pod's UID. The pod will be `Pending` until the CSR has been `Approved`. - -1. Monitor the status of this feature using the `TigeraStatus`: - -``` -kubectl get tigerastatus -NAME AVAILABLE PROGRESSING DEGRADED SINCE -calico True False False 2m40s -``` - -### Implement your own signing and approval process - -**Required steps** - -This feature uses api version `certificates.k8s.io/v1beta1` for [certificate signing requests](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/). -To automate the signing and approval process, run a server that performs the following actions: - -1. Watch `CertificateSigningRequests` resources with status `Pending` and `spec.signerName=`. - - :::note - - You can skip this step if you are using a version before Kubernetes v1.18; (the signerName field was not available). - - ::: - -1. For each `Pending` CSR perform (security) checks (see next heading) -1. Issue a certificate and update `.spec.status.certificate` -1. Approve the CSR and update `.spec.status.conditions` - -**Security requirements** - -Based on your requirements you may want to implement custom checks to make sure that no certificates are issued for a malicious user. -When a CSR is created, the kube-apiserver adds immutable fields to the spec to help you perform checks: - -- `.spec.username`: username of the requester -- `.spec.groups`: user groups of the requester -- `.spec.request`: certificate request in pem format - -Verify that the user and/or group match with the requested certificate subject (alt) names. - -**Implement your signer and approver using golang** - -- Use [client-go](https://github.com/kubernetes/client-go) to create a clientset -- To watch CSRs, use `clientset.CertificatesV1().CertificateSigningRequests().Watch(..)` -- To issue the certificate use `clientset.CertificatesV1().CertificateSigningRequests().UpdateStatus(...)` -- To approve the CSR use `clientset.CertificatesV1().CertificateSigningRequests().UpdateApproval(...)` - -### Additional resources - -- Read [kubernetes certificate signing requests](https://kubernetes.io/docs/reference/access-authn-authz/certificate-signing-requests/) for more information on CSRs -- Use [client-go](https://github.com/kubernetes/client-go) to implement a controller to sign and approve a CSR diff --git a/calico_versioned_docs/version-3.25/operations/datastore-migration.mdx b/calico_versioned_docs/version-3.25/operations/datastore-migration.mdx deleted file mode 100644 index ca16821038..0000000000 --- a/calico_versioned_docs/version-3.25/operations/datastore-migration.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -description: Migrate your cluster from using an etcdv3 datastore to a Kubernetes datastore. ---- - -# Migrate Calico data from an etcdv3 datastore to a Kubernetes datastore - -## Big picture - -Switch your {{prodname}} datastore from etcdv3 to Kubernetes on a live cluster. - -## Value - -Using Kubernetes as your datastore provides a number of benefits over using etcdv3 -directly, including fewer components and better support for role based access -control. For most users, using the Kubernetes data store will provide a better -experience. We provide a seamless way to migrate your data from an existing -cluster with an etcdv3 datastore to a Kubernetes datastore. For the -complete set of advantages of using a Kubernetes datastore over an etcd datastore, see -[{{prodname}} Datastore](../getting-started/kubernetes/hardway/the-calico-datastore.mdx#using-kubernetes-as-the-datastore) -documentation. - -## Before you begin - -- Ensure that your Calico installation is configured to use an etcdv3 datastore. This guide does not apply to clusters installed using the Kubernetes API datastore. - -- The **latest version of calicoctl** must be [installed and configured to access etcd](calicoctl/install.mdx). - - :::note - - Since the steps below require modifying calicoctl configuration, we do not recommend using calicoctl installed - as a Kubernetes pod for this procedure. Instead, install the binary directly on a host with access to etcd and the Kubernetes API. - - ::: - -## How To - -### Migrate the datastore - -To migrate contents of the datastore, we will be using the `calicoctl datastore migrate` -command and subcommands. For more information, see the -[calicoctl datastore migrate](../reference/calicoctl/datastore/migrate/overview.mdx) -documentation. - -1. Lock the etcd datastore for migration. This prevents any changes to the data from - affecting the cluster. - - ``` - calicoctl datastore migrate lock - ``` - - :::note - - After running the above command, you cannot make changes to the configuration of your cluster until the - migration is complete. New pods will not be started until after the migration. - - ::: - -1. Export the datastore contents to a file. - - ``` - calicoctl datastore migrate export > etcd-data - ``` - -1. Configure `calicoctl` to access the - [Kubernetes datastore](calicoctl/configure/kdd.mdx). - -1. Import the datastore contents from your exported file. - - ``` - calicoctl datastore migrate import -f etcd-data - ``` - -1. Verify that the datastore was properly imported. This can be accomplished by using - `calicoctl` to query for any {{prodname}} resources that exist in the etcd - datastore (e.g. networkpolicy). - - ``` - calicoctl get networkpolicy - ``` - -1. Configure {{prodname}} to read from the Kubernetes datastore. Follow the - directions to install {{prodname}} with the Kubernetes datastore. The - installation instructions contain the relevant version of the - `calico.yaml` file to apply. - - ``` - kubectl apply -f calico.yaml - ``` - -1. Wait for Calico to perform a rolling update before continuing by monitoring the following command - - ``` - kubectl rollout status daemonset calico-node -n kube-system - ``` - -1. Unlock the datastore. This allows the {{prodname}} resources to affect the cluster again. - - ``` - calicoctl datastore migrate unlock - ``` - - :::note - - Once the Kubernetes datastore is unlocked, the datastore migration - cannot be rolled back. Make sure that the Kubernetes datastore is populated with - all of the expected {{prodname}} resources prior to unlocking the datastore. - - ::: - -### Roll back the datastore migration - -Rolling back the datastore migration can only be done if the original etcd datastore still exists -and the Kubernetes datastore was not unlocked after the datastore resources were imported. The -following steps delete the {{prodname}} resources imported into the Kubernetes datastore and -configure the cluster to once again read from the original etcd datastore. - -1. Lock the Kubernetes datastore. - - ``` - calicoctl datastore migrate lock - ``` - -1. Delete all of the {{prodname}} CRDs. This will remove all of the data imported into - the Kubernetes datastore. - - ``` - kubectl delete $(kubectl get crds -o name | grep projectcalico.org) - ``` - -1. Configure {{prodname}} to read from the etcd datastore. Follow the - directions to install {{prodname}} with the etcd datastore. The - installation instructions contain the relevant version of the - `calico.yaml` file to apply. - - ``` - kubectl apply -f calico.yaml - ``` - -1. Configure `calicoctl` to access the - [etcd datastore](calicoctl/configure/etcd.mdx). - -1. Unlock the etcd datastore. This allows the {{prodname}} resources to affect the cluster again. - - ``` - calicoctl datastore migrate unlock - ``` diff --git a/calico_versioned_docs/version-3.25/operations/decommissioning-a-node.mdx b/calico_versioned_docs/version-3.25/operations/decommissioning-a-node.mdx deleted file mode 100644 index bdc0a56f94..0000000000 --- a/calico_versioned_docs/version-3.25/operations/decommissioning-a-node.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -description: Manually remove a node from a cluster that is installed with Calico. ---- - -# Decommission a node - -## About decommissioning nodes - -If you are running the [node controller](../reference/kube-controllers/configuration.mdx) -or using the Kubernetes API datastore in policy-only mode, you do not need to manually decommission nodes. - -In other configurations, you may need to manually decommission a node for one -of the following reasons. - -- You are decommissioning a host running `{{nodecontainer}}` or removing it from your - cluster. -- You are renaming a node. -- You are receiving an error about an IP address already in use. -- Readiness checks are failing due to unreachable peers that are no longer in the - cluster. -- Hosts are regularly added and removed from your cluster. - -## Purpose of this page - -Provide guidance on how to remove a host that is part of a {{prodname}} cluster -and clean up the associated [node resource reference](../reference/resources/node.mdx) -information. - -## Prerequisites - -- Prior to removing any Node resource from the datastore the `{{nodecontainer}}` - container should be stopped on the corresponding host and it should be - ensured that it will not be restarted. -- You must have [calicoctl configured](calicoctl/install.mdx) and operational to run - the commands listed here. - -## Removing a node resource - -Removing a Node resource will also remove the Workload Endpoint, Host -Endpoint, and IP Address resources and any other sub configuration items -associated with that Node. - -:::note - -- Deleting a Node resource may be service impacting if the host is still in - service. Ensure that the host is no longer in service before deleting the - Node resource. -- Any configuration specific to the node will be removed. This would be - configuration like node BGP peerings or custom Felix configs. - -::: - -## Removing a single node resource - -See the example below for how to remove a node with the calicoctl command. - -See [Removing a Node resource](#removing-a-node-resource) above. - -```bash -calicoctl delete node -``` - -## Removing multiple node resources - -To remove several Nodes, a file can be created with several Node resources and -then be passed to the `calicoctl delete` command with the `-f` flag. -Below is an example of how to create a file of Nodes and delete them. - -1. Create a file with the [Node resources](../reference/resources/node.mdx) that need - to be removed. For example: - - ```yaml - - apiVersion: projectcalico.org/v3 - kind: Node - metadata: - name: node-02 - - apiVersion: projectcalico.org/v3 - kind: Node - metadata: - name: node-03 - ``` - -2. To delete the nodes listed in the file pass it like below. - - :::caution - - See [Removing a Node resource](#removing-a-node-resource) above. - - ::: - - ```bash - calicoctl delete -f nodes_to_delete.yaml - ``` - diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/enabling-ebpf.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/enabling-ebpf.mdx deleted file mode 100644 index f65fd76972..0000000000 --- a/calico_versioned_docs/version-3.25/operations/ebpf/enabling-ebpf.mdx +++ /dev/null @@ -1,381 +0,0 @@ ---- -description: Step-by-step instructions for enabling the eBPF dataplane. ---- - -# Enable the eBPF dataplane - -import EbpfValue from '@site/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx'; - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Enable the eBPF dataplane on an existing cluster. - -## Value - - - -## Concepts - -### eBPF - -eBPF (or "extended Berkeley Packet Filter"), is a technology that allows safe mini programs to be attached to various low-level hooks in the Linux kernel. eBPF has a wide variety of uses, including networking, security, and tracing. You’ll see a lot of non-networking projects leveraging eBPF, but for {{prodname}} our focus is on networking, and in particular, pushing the networking capabilities of the latest Linux kernels to the limit. - -## Before you begin - -### Supported - -- x86-64 -- ARM64 (community supported, not actively regression tested by the {{prodname}} team) - -- Distributions: - - - Generic or kubeadm - - kOps - - OpenShift - - EKS - - AKS with limitations: - - [AKS with Azure CNI and Calico network policy](../../getting-started/kubernetes/managed-public-cloud/aks.mdx#install-aks-with-calico-for-network-policy) works, but it is not possible to disable kube-proxy resulting in wasted resources and suboptimal performance. - - [AKS with {{prodname}} networking](../../getting-started/kubernetes/managed-public-cloud/aks.mdx#install-aks-with-calico-networking) is in testing with the eBPF dataplane. This should be a better solution overall but, at time of writing, the testing was not complete. - - RKE (RKE2 recommended because it supports disabling `kube-proxy`) - -- Linux distribution/kernel: - - - Ubuntu 20.04. - - Red Hat v8.2 with Linux kernel v4.18.0-193 or above (Red Hat have backported the required features to that build). - - Another [supported distribution](../../getting-started/kubernetes/requirements.mdx) with Linux kernel v5.3 or above. Kernel v5.8 or above with CO-RE enabled is recommended for better performance. - -- An underlying network fabric that allows VXLAN traffic between hosts. In eBPF mode, VXLAN is used to forward Kubernetes NodePort traffic. - -### Not supported - -- Other processor architectures. - -- Distributions: - - - GKE. This is because of an incompatibility with the GKE CNI plugin. - -- Clusters with some eBPF nodes and some standard dataplane and/or Windows nodes. -- IPv6. -- Floating IPs. -- SCTP (either for policy or services). This is due to lack of kernel support for the SCTP checksum in BPF. -- `Log` action in policy rules. This is because the `Log` action maps to the iptables `LOG` action and BPF programs cannot access that log. -- VLAN-based traffic. - -### Performance - -For best pod-to-pod performance, we recommend using an underlying network that doesn't require Calico to use an overlay. For example: - -- A cluster within a single AWS subnet. -- A cluster using a compatible cloud provider's CNI (such as the AWS VPC CNI plugin). -- An on-prem cluster with BGP peering configured. - -If you must use an overlay, we recommend that you use VXLAN, not IPIP. VXLAN has much better performance than IPIP in -eBPF mode due to various kernel optimisations. - -## How to - -- [Verify that your cluster is ready for eBPF mode](#verify-that-your-cluster-is-ready-for-ebpf-mode) -- [Configure {{prodname}} to talk directly to the API server](#configure-calico-to-talk-directly-to-the-api-server) -- [Configure kube-proxy](#configure-kube-proxy) -- [Enable eBPF mode](#enable-ebpf-mode) -- [Try out DSR mode](#try-out-dsr-mode) -- [Reversing the process](#reversing-the-process) - -### Verify that your cluster is ready for eBPF mode - -This section explains how to make sure your cluster is suitable for eBPF mode. - -To check that the kernel on a node is suitable, you can run - -```bash -uname -rv -``` - -The output should look like this: - -``` -5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 -``` - -In this case the kernel version is v5.4, which is suitable. - -On Red Hat-derived distributions, you may see something like this: - -``` -4.18.0-193.el8.x86_64 (mockbuild@x86-vm-08.build.eng.bos.redhat.com) -``` - -Since the Red Hat kernel is v4.18 with at least build number 193, this kernel is suitable. - -### Configure {{prodname}} to talk directly to the API server - -In eBPF mode, {{prodname}} implements Kubernetes service networking directly (rather than relying on `kube-proxy`). -Of course, this makes it highly desirable to disable `kube-proxy` when running in eBPF mode to save resources -and avoid confusion over which component is handling services. - -To be able to disable `kube-proxy`, {{prodname}} needs to communicate to the API server _directly_ rather than -going through `kube-proxy`. To make _that_ possible, we need to find a persistent, static way to reach the API server. -The best way to do that varies by Kubernetes distribution: - -- If you created a cluster manually (for example by using `kubeadm`) then the right address to use depends on whether you - opted for a high-availability cluster with multiple API servers or a simple one-node API server. - - - If you opted to set up a high availability cluster then you should use the address of the load balancer that you - used in front of your API servers. As noted in the Kubernetes documentation, a load balancer is required for a - HA set-up but the precise type of load balancer is not specified. - - If you opted for a single control plane node then you can use the address of the control plane node itself. However, - it's important that you use a _stable_ address for that node such as a dedicated DNS record, or a static IP address. - If you use a dynamic IP address (such as an EC2 private IP) then the address may change when the node is restarted - causing {{prodname}} to lose connectivity to the API server. - -- `kops` typically sets up a load balancer of some sort in front of the API server. You should use - the FQDN and port of the API load balancer, for example `api.internal.` as the `KUBERNETES_SERVICE_HOST` - below and 443 as the `KUBERNETES_SERVICE_PORT`. -- OpenShift requires various DNS records to be created for the cluster; one of these is exactly what we need: - `api-int..` should point to the API server or to the load balancer in front of the - API server. Use that (filling in the `` and `` as appropriate for your cluster) for the - `KUBERNETES_SERVICE_HOST` below. Openshift uses 6443 for the `KUBERNETES_SERVICE_PORT`. -- For AKS and EKS clusters you should use the FQDN of the API server's load balancer. This can be found with - ``` - kubectl cluster-info - ``` - which gives output like the following: - ``` - Kubernetes master is running at https://60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com - ... - ``` - In this example, you would use `60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com` for - `KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map. -- MKE and Rancher neither allow `kube-proxy` to be disabled nor provide a stable address for the - API server that is suitable for the next step. The best option on these platforms is to - - - Let {{prodname}} connect to the API server as through `kube-proxy` (by skipping the step below to create the - `kubernetes-services-endpoint` config map). - - Then, follow the instructions in [Avoiding conflicts with kube-proxy](#avoiding-conflicts-with-kube-proxy) below, - or connectivity will fail when eBPF mode is enabled. - -**The next step depends on whether you installed {{prodname}} using the operator, or a manifest:** - - - - -If you installed {{prodname}} using the operator, create the following config map in the `tigera-operator` namespace using the host and port determined above: - -```yaml -kind: ConfigMap -apiVersion: v1 -metadata: - name: kubernetes-services-endpoint - namespace: tigera-operator -data: - KUBERNETES_SERVICE_HOST: "" - KUBERNETES_SERVICE_PORT: "" -``` - -The operator will pick up the change to the config map automatically and do a rolling update of {{prodname}} to pass on the change. Confirm that pods restart and then reach the `Running` state with the following command: - -``` -watch kubectl get pods -n calico-system -``` - -If you do not see the pods restart then it's possible that the `ConfigMap` wasn't picked up (sometimes Kubernetes is slow to propagate `ConfigMap`s (see Kubernetes [issue #30189](https://github.com/kubernetes/kubernetes/issues/30189))). You can try restarting the operator. - - - - -If you installed {{prodname}} using a manifest, create the following config map in the `kube-system` namespace using the host and port determined above: - -```yaml -kind: ConfigMap -apiVersion: v1 -metadata: - name: kubernetes-services-endpoint - namespace: kube-system -data: - KUBERNETES_SERVICE_HOST: "" - KUBERNETES_SERVICE_PORT: "" -``` - -Wait 60s for kubelet to pick up the `ConfigMap` (see Kubernetes [issue #30189](https://github.com/kubernetes/kubernetes/issues/30189)); then, restart the {{prodname}} pods to pick up the change: - -``` -kubectl delete pod -n kube-system -l k8s-app=calico-node -kubectl delete pod -n kube-system -l k8s-app=calico-kube-controllers -``` - -And, if using Typha: - -``` -kubectl delete pod -n kube-system -l k8s-app=calico-typha -``` - -Confirm that pods restart and then reach the `Running` state with the following command: - -``` -watch "kubectl get pods -n kube-system | grep calico" -``` - -You can verify that the change was picked up by checking the logs of one of the {{nodecontainer}} pods. - -``` -kubectl get po -n kube-system -l k8s-app=calico-node -``` - -Should show one or more pods: - -``` -NAME READY STATUS RESTARTS AGE -{{noderunning}}-d6znw 1/1 Running 0 48m -... -``` - -Then, to search the logs, choose a pod and run: - -``` -kubectl logs -n kube-system | grep KUBERNETES_SERVICE_HOST -``` - -You should see the following log, with the correct `KUBERNETES_SERVICE_...` values. - -``` -2020-08-26 12:26:29.025 [INFO][7] daemon.go 182: Kubernetes server override env vars. KUBERNETES_SERVICE_HOST="172.16.101.157" KUBERNETES_SERVICE_PORT="6443" -``` - - - - -### Configure kube-proxy - -In eBPF mode {{prodname}} replaces `kube-proxy` so it wastes resources (and reduces performance) to run both. -This section explains how to disable `kube-proxy` in some common environments. - -#### Clusters that run `kube-proxy` with a `DaemonSet` (such as `kubeadm`) - -For a cluster that runs `kube-proxy` in a `DaemonSet` (such as a `kubeadm`-created cluster), you can disable `kube-proxy` reversibly by adding a node selector to `kube-proxy`'s `DaemonSet` that matches no nodes, for example: - -``` -kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' -``` - -Then, should you want to start `kube-proxy` again, you can simply remove the node selector. - -:::note - -This approach is not suitable for AKS with Azure CNI since that platform makes use of the Kubernetes add-on manager. -the change will be reverted by the system. For AKS, you should follow [Avoiding conflicts with kube-proxy](#avoiding-conflicts-with-kube-proxy) -below. - -::: - -#### OpenShift - -If you are running OpenShift, you can disable `kube-proxy` as follows: - -``` -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": false}}' -``` - -To re-enable it: - -``` -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": true}}' -``` - -### Avoiding conflicts with kube-proxy - -If you cannot disable `kube-proxy` (for example, because it is managed by your Kubernetes distribution), then you _must_ change Felix configuration parameter `BPFKubeProxyIptablesCleanupEnabled` to `false`. This can be done with `kubectl` as follows: - -``` -kubectl patch felixconfiguration default --patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}' -``` - -If both `kube-proxy` and `BPFKubeProxyIptablesCleanupEnabled` is enabled then `kube-proxy` will write its iptables rules and Felix will try to clean them up resulting in iptables flapping between the two. - -### Enable eBPF mode - -**The next step depends on whether you installed {{prodname}} using the operator, or a manifest:** - - - - -To enable eBPF mode, change the `spec.calicoNetwork.linuxDataplane` parameter in the operator's `Installation` resource to `"BPF"`. - -```bash -kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF"}}}' -``` - -:::note - -the operator rolls out the change with a rolling update which means that some nodes will be in eBPF mode -before others. This can disrupt the flow of traffic through node ports. We plan to improve this in an upcoming release -by having the operator do the update in two phases. - -::: - - - - -If you installed {{prodname}} using a manifest, change Felix configuration parameter `BPFEnabled` to `true`. This can be done with `calicoctl`, as follows: - -``` -calicoctl patch felixconfiguration default --patch='{"spec": {"bpfEnabled": true}}' -``` - - - - - -When enabling eBPF mode, preexisting connections continue to use the non-BPF datapath; such connections should -not be disrupted, but they do not benefit from eBPF mode’s advantages. - -### Try out DSR mode - -Direct return mode skips a hop through the network for traffic to services (such as node ports) from outside the cluster. This reduces latency and CPU overhead but it requires the underlying network to allow nodes to send traffic with each other's IPs. In AWS, this requires all your nodes to be in the same subnet and for the source/dest check to be disabled. - -DSR mode is disabled by default; to enable it, set the `BPFExternalServiceMode` Felix configuration parameter to `"DSR"`. This can be done with `calicoctl`: - -``` -calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "DSR"}}' -``` - -To switch back to tunneled mode, set the configuration parameter to `"Tunnel"`: - -``` -calicoctl patch felixconfiguration default --patch='{"spec": {"bpfExternalServiceMode": "Tunnel"}}' -``` - -Switching external traffic mode can disrupt in-progress connections. - -### Reversing the process - -To revert to standard Linux networking: - -1. (Depending on whether you installed Calico with the operator or with a manifest) reverse the changes to the operator's `Installation` or the `FelixConfiguration` resource: - - - - - ```bash - kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"Iptables"}}}' - ``` - - - - - ``` - calicoctl patch felixconfiguration default --patch='{"spec": {"bpfEnabled": false}}' - ``` - - - - -1. If you disabled `kube-proxy`, re-enable it (for example, by removing the node selector added above). - - ``` - kubectl patch ds -n kube-system kube-proxy --type merge -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": null}}}}}' - ``` - -1. Since disabling eBPF mode is disruptive to existing connections, monitor existing workloads to make sure they re-establish any connections that were disrupted by the switch. diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/index.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/index.mdx deleted file mode 100644 index 4350e69323..0000000000 --- a/calico_versioned_docs/version-3.25/operations/ebpf/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Documentation for eBPF dataplane mode, including how to enable eBPF dataplane mode. -hide_table_of_contents: true ---- - -# eBPF dataplane mode - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/install.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/install.mdx deleted file mode 100644 index d6a76aacda..0000000000 --- a/calico_versioned_docs/version-3.25/operations/ebpf/install.mdx +++ /dev/null @@ -1,426 +0,0 @@ ---- -description: Install Calico in eBPF mode. ---- - -# Install in eBPF mode - -import EbpfValue from '@site/calico_versioned_docs/version-3.25/_includes/content/_ebpf-value.mdx'; - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Install the eBPF dataplane during the initial installation of {{prodname}}. - -## Value - - - -## Concepts - -### eBPF - -eBPF (or "extended Berkeley Packet Filter"), is a technology that allows safe mini programs to be attached to various -low-level hooks in the Linux kernel. eBPF has a wide variety of uses, including networking, security, and tracing. -You’ll see a lot of non-networking projects leveraging eBPF, but for {{prodname}} our focus is on networking, -and in particular, pushing the networking capabilities of the latest Linux kernels to the limit. - -## Before you begin - -### Supported - -- x86-64 - -- Distributions: - - - Generic or kubeadm - - kOps - - OpenShift - - EKS - - AKS - -- Linux distribution/kernel: - - - Ubuntu 20.04. - - Red Hat v8.2 with Linux kernel v4.18.0-193 or above (Red Hat have backported the required features to that build). - - Another [supported distribution](../../getting-started/kubernetes/requirements.mdx) with Linux kernel v5.3 or above. - -- An underlying network fabric that allows VXLAN traffic between hosts. In eBPF mode, VXLAN is used to forward Kubernetes NodePort traffic. - -### Not supported - -- Other processor architectures. - -- Distributions: - - - GKE. This is because of an incompatibility with the GKE CNI plugin. - - - RKE: eBPF mode cannot be enabled at install time because RKE doesn't provide - a stable address for the API server. However, by following [these instructions](enabling-ebpf.mdx), - it can be enabled as a post-install step. - -- Clusters with some eBPF nodes and some standard dataplane and/or Windows nodes. -- IPv6. -- Host endpoint `doNotTrack` policy (other policy types are supported). -- Floating IPs. -- SCTP (either for policy or services). -- `Log` action in policy rules. -- Tagged VLAN devices. - -### Performance - -For best pod-to-pod performance, we recommend using an underlying network that doesn't require Calico to use an overlay. For example: - -- A cluster within a single AWS subnet. -- A cluster using a compatible cloud provider's CNI (such as the AWS VPC CNI plugin). -- An on-prem cluster with BGP peering configured. - -If you must use an overlay, we recommend that you use VXLAN, not IPIP. VXLAN has better performance than IPIP in -eBPF mode due to various kernel optimisations. - -## How to - -To install in eBPF mode, we recommend using the Tigera Operator to install {{prodname}} so these instructions -use the operator. Installing {{prodname}} normally consists of the following stages, which are covered by the -main installation guides: - -- Create a cluster suitable to run {{prodname}}. -- Install the Tigera Operator (possibly via a Helm chart), and the associated Custom Resource Definitions. -- Apply a set of Custom Resources to tell the operator what to install. -- Wait for the operator to provision all the associated resources and report back via its status resource. - -To install directly in eBPF is very similar; this guide explains the differences: - -- [Create a cluster](#create-a-suitable-cluster) suitable to run {{prodname}} **with the added requirement that the nodes must use a recent - enough kernel**. -- [**Create a config map with the "real" address of the API server.**](#create-kubernetes-service-endpoint-config-map) This allows the operator to install {{prodname}} - with a direct connection to the API server so that it can take over from `kube-proxy`. -- [Install the Tigera Operator](#install-the-tigera-operator) (possibly via a Helm chart), and the associated Custom Resource Definitions. -- **[Download and tweak the installation Custom Resource](#tweak-and-apply-installation-custom-resources) to tell the operator to use eBPF mode.** -- [Apply a set of Custom Resources](#tweak-and-apply-installation-custom-resources) to tell the operator what to install. -- [Wait for the operator to provision all the associated resources and report back via its status resource](#monitor-the-progress-of-the-installation). -- [Disable kube-proxy or avoid conflicts.](#disable-kube-proxy-or-avoid-conflicts) - -These steps are explained in more detail below. - -### Create a suitable cluster - -The basic requirement for eBPF mode is to have a recent-enough kernel (see [above](#supported)). - -Select the appropriate tab below for distribution-specific instructions: - - - - -`kubeadm` supports a number of base OSes; as long as the base OS chosen (such as Ubuntu 20.04) meets the kernel -requirements, `kubeadm`-provisioned clusters are supported. - -Since `kube-proxy` is not required in eBPF mode, you may wish to disable `kube-proxy` at install time. With `kubeadm` -you can do that by passing the ` --skip-phases=addon/kube-proxy` flag to `kubeadm init`: - -``` -kubeadm init --skip-phases=addon/kube-proxy -``` - - - - -`kops` supports a number of base OSes; as long as the base OS chosen (such as Ubuntu 20.04 or RHEL 8.2) meets the kernel -requirements, `kops`-provisioned clusters are supported. - -Since `kube-proxy` is not required in eBPF mode, you may wish to disable `kube-proxy` at install time. With `kops` you -can do that by setting the following in your `kops` configuration: - -```yaml -kubeProxy: - enabled: false -``` - - - - -OpenShift supports a number of base OSes; as long as the base OS chosen has a recent enough kernel, OpenShift clusters are -fully supported. Since Red Hat have backported the eBPF features required by {{prodname}} the Red Hat kernel -version required is lower than the mainline: v4.18.0-193 or above. - - - - -Azure Kubernetes Service (AKS) supports a number of base OSes. The most recent Ubuntu 18.04 image has a recent enough -kernel to use with eBPF mode. - -AKS does not support disabling `kube-proxy` so it's necessary to tell {{prodname}} not to try to clean up -`kube-proxy`'s iptables rules at a later stage. - - - - -Amazon's Elastic Kubernetes Service (EKS) supports a number of base OSes for nodes. At the time of writing, the -default kernel used by Amazon Linux is recent enough to run eBPF mode, as is the Bottlerocket kernel. The Ubuntu -18.04 image did not have a recent-enough kernel (but that may have changed by the time you read this). - - - - -### Create kubernetes-service-endpoint config map - -In eBPF mode, {{prodname}} takes over from `kube-proxy`. This means that, like `kube-proxy`, it needs to be able -to reach the API server _directly_ rather than by using the API server's `ClusterIP`. To tell {{prodname}} how -to reach the API server we create a `ConfigMap` with the API server's "real" address. In this guide we do that before -installing the Tigera Operator. That means that the operator itself can also use the direct connection and hence -it doesn't require `kube-proxy` to be running. - -The tabs below explain how to find the "real" address of the API server for a range of distributions. -**Note:** In all cases it's important that the address used is stable even if your API server is restarted or -scaled up/down. If you have multiple API servers, with DNS or other load balancing in front it's important to use -the address of the load balancer. This prevents {{prodname}} from being disconnected if the API servers IP changes. - - - - -If you created a cluster manually (for example by using `kubeadm`) then the right address to use depends on whether you -opted for a high-availability cluster with multiple API servers or a simple one-node API server. - -- If you opted to set up a high availability cluster then you should use the address of the load balancer that you - used in front of your API servers. As noted in the Kubernetes documentation, a load balancer is required for a - HA set-up but the precise type of load balancer is not specified. -- If you opted for a single control plane node then you can use the address of the control plane node itself. However, - it's important that you use a _stable_ address for that node such as a dedicated DNS record, or a static IP address. - If you use a dynamic IP address (such as an EC2 private IP) then the address may change when the node is restarted - causing {{prodname}} to lose connectivity to the API server. - - - - -When using `kops`, `kops` typically sets up a load balancer of some sort in front of the API server. You should use -the FQDN and port of the API load balancer: `api.internal.`. - - - - -OpenShift requires various DNS records to be created for the cluster; one of these is exactly what we need: -`api..` should point to the API server or to the load balancer in front of the -API server. Use that (filling in the `` and `` as appropriate for your cluster) for the -`KUBERNETES_SERVICE_HOST` below. Openshift uses 6443 for the `KUBERNETES_SERVICE_PORT`. - - - - -For AKS clusters, you should use the FQDN of your API server. This can be found by running the following command: - -``` -kubectl cluster-info -``` - -which should give output similar to the following: - -``` -Kubernetes master is running at https://mycalicocl-calicodemorg-03a087-36558dbb.hcp.canadaeast.azmk8s.io:443 -``` - -In this example, you would use `mycalicocl-calicodemorg-03a087-36558dbb.hcp.canadaeast.azmk8s.io` for -`KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map. - - - - -For an EKS cluster, it's important to use the domain name of the EKS-provided load balancer that is in front of the API -server. This can be found by running the following command: - -``` -kubectl cluster-info -``` - -which should give output similar to the following: - -``` -Kubernetes master is running at https://60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com -... -``` - -In this example, you would use `60F939227672BC3D5A1B3EC9744B2B21.gr7.us-west-2.eks.amazonaws.com` for -`KUBERNETES_SERVICE_HOST` and `443` for `KUBERNETES_SERVICE_PORT` when creating the config map. - - - - -Create the following config map in the `tigera-operator` namespace using the host and port determined above: - -``` -kubectl apply -f - <" - KUBERNETES_SERVICE_PORT: "" -EOF -``` - -:::tip - -If you forget to create the config map before installing the operator you can create it afterwards and -then wait 60 seconds (for the config map to propagate) before restarting the operator: - -``` -kubectl delete pod -n tigera-operator -l k8s-app=tigera-operator -``` - -::: - -### Install the Tigera Operator - -Follow the steps in the main install guide for your platform to install the Tigera Operator (and possibly the -Prometheus Operator). However, **stop** before applying the `custom-resources.yaml`; we'll customise that file -to enable eBPF mode in the next step... - -### Tweak and apply installation Custom Resources - -When the main install guide tells you to apply the `custom-resources.yaml`, typically by running `kubectl create` with -the URL of the file directly, you should instead download the file, so that you can edit it: - -```bash - curl -o custom-resources.yaml {{manifestsUrl}}/manifests/custom-resources.yaml -``` - -Edit the file in your editor of choice and find the `Installation` resource, which should be at the top of the file. -To enable eBPF mode, we need to add a new `calicoNetwork` section inside the `spec` of the Installation resource, -including the `linuxDataplane` field. For EKS Bottlerocket OS only, you should also add the `flexVolumePath` setting -as shown below. - - For example: - - ```yaml - # This section includes base {{prodname}} installation configuration. - - kind: Installation - metadata: - name: default - spec: - # Added calicoNetwork section with linuxDataplane field - calicoNetwork: - linuxDataplane: BPF - - # EKS with Bottlerocket as node image only: - # flexVolumePath: /var/lib/kubelet/plugins - - # ... remainder of the Installation resource varies by platform ... - - # Install Calico Open Source - variant: Calico - - # ... remainder of the Installation resource varies by platform ... - - # This section configures the Calico API server. - apiVersion: operator.tigera.io/v1 - kind: APIServer - metadata: - name: default - spec: {} - ``` - -Then apply the edited file: - -```bash -kubectl create -f custom-resources.yaml -``` - -:::tip - -If you already created the custom resources, you can switch your cluster over to eBPF mode by updating the -installation resource. The operator will automatically roll out the change. - -```bash -kubectl patch installation.operator.tigera.io default --type merge -p '{"spec":{"calicoNetwork":{"linuxDataplane":"BPF", "hostPorts":null}}}' -``` - -::: - -### Monitor the progress of the installation - -You can monitor progress of the installation with the following command: - -```bash -watch kubectl get tigerastatus -``` - -### Disable `kube-proxy` (or avoid conflicts) - -In eBPF mode, to avoid conflicts with `kube-proxy` it's necessary to either disable `kube-proxy` or to configure -{{prodname}} not to clean up `kube-proxy`'s iptables rules. If you didn't disable `kube-proxy` when starting -your cluster then follow the steps below to avoid conflicts: - - - - -For a cluster that runs `kube-proxy` in a `DaemonSet` (such as a `kubeadm`-created cluster), you can disable -`kube-proxy`, reversibly, by adding a node selector to `kube-proxy`'s `DaemonSet` that matches no nodes, for example: - -``` -kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' -``` - -Then, should you want to start `kube-proxy` again, you can simply remove the node selector. - - - - -`kops` allows `kube-proxy` to be disabled by setting - -```yaml -kubeProxy: - enabled: false -``` - -in its configuration. You will need to do `kops update cluster` to roll out the change. - - - - -In OpenShift, you can disable `kube-proxy` as follows: - -``` -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": false}}' -``` - -If you need to re-enable it later: - -``` -kubectl patch networks.operator.openshift.io cluster --type merge -p '{"spec":{"deployKubeProxy": true}}' -``` - - - - -AKS with Azure CNI does not allow `kube-proxy` to be disabled, `kube-proxy` is deployed by the add-on manager, which will reconcile -away any manual changes made to its configuration. To ensure `kube-proxy` and {{prodname}} don't fight, set -the Felix configuration parameter `bpfKubeProxyIptablesCleanupEnabled` to false. This can be done with -`kubectl` as follows: - -``` -kubectl patch felixconfiguration default --type merge --patch='{"spec": {"bpfKubeProxyIptablesCleanupEnabled": false}}' -``` - - - - -In EKS, you can disable `kube-proxy`, reversibly, by adding a node selector that doesn't match and nodes to -`kube-proxy`'s `DaemonSet`, for example: - -``` -kubectl patch ds -n kube-system kube-proxy -p '{"spec":{"template":{"spec":{"nodeSelector":{"non-calico": "true"}}}}}' -``` - -Then, should you want to start `kube-proxy` again, you can simply remove the node selector. - - - - -## Next steps - -**Recommended** - -- [Learn more about eBPF](use-cases-ebpf.mdx) diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/troubleshoot-ebpf.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/troubleshoot-ebpf.mdx deleted file mode 100644 index c7991eef45..0000000000 --- a/calico_versioned_docs/version-3.25/operations/ebpf/troubleshoot-ebpf.mdx +++ /dev/null @@ -1,247 +0,0 @@ ---- -description: How to troubleshoot when running in eBPF mode. ---- - -# Troubleshoot eBPF mode - -This document gives some general troubleshooting guidance for the eBPF dataplane. - -To understand basic concepts, we recommend the following video by Tigera Engineers: [Opening the Black Box: Understanding and troubleshooting Calico's eBPF Data Plane](https://www.youtube.com/watch?v=Mh43sNBu208). - -## Troubleshoot access to services - -If pods or hosts within your cluster have trouble accessing services, check the following: - -- Either {{prodname}}'s eBPF mode or `kube-proxy` must be active on a host for services to function. If you - disabled `kube-proxy` when enabling eBPF mode, verify that eBPF mode is actually functioning. If {{prodname}} - detects that the kernel is not supported, it will fall back to standard dataplane mode (which does not support - services). - - To verify that eBPF mode is correctly enabled, examine the log for a `{{noderunning}}` container; if - eBPF mode is not supported it will log an `ERROR` log that says - - ```bash - BPF dataplane mode enabled but not supported by the kernel. Disabling BPF mode. - ``` - - If BPF mode is correctly enabled, you should see an `INFO` log that says - - ```bash - BPF enabled, starting BPF endpoint manager and map manager. - ``` - -- In eBPF mode, external client access to services (typically NodePorts) is implemented using VXLAN encapsulation. - If NodePorts time out when the backing pod is on another node, check your underlying network fabric allows - VXLAN traffic between the nodes. VXLAN is a UDP protocol; by default it uses port 4789. -- In DSR mode, {{prodname}} requires that the underlying network fabric allows one node to respond on behalf of - another. - - - In AWS, to allow this, the Source/Dest check must be disabled on the node's NIC. However, note that DSR only - works within AWS; it is not compatible with external traffic through a load balancer. This is because the load - balancer is expecting the traffic to return from the same host. - - - In GCP, the "Allow forwarding" option must be enabled. As with AWS, traffic through a load balancer does not - work correctly with DSR because the load balancer is not consulted on the return path from the backing node. - -# The `calico-bpf` tool - -Since BPF maps contain binary data, the {{prodname}} team wrote a tool to examine {{prodname}}'s BPF maps. -The tool is embedded in the {{nodecontainer}} container image. To run the tool: - -- Find the name of the {{nodecontainer}} Pod on the host of interest using - - ```bash - kubectl get pod -o wide -n calico-system - ``` - - for example, `calico-node-abcdef` - -- Run the tool as follows: - - ```bash - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf ... - ``` - - For example, to show the tool's help: - - ```bash - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf help - - Usage: - calico-bpf [command] - - Available Commands: - arp Manipulates arp - connect-time Manipulates connect-time load balancing programs - conntrack Manipulates connection tracking - counters Show and reset counters - help Help about any command - ipsets Manipulates ipsets - nat Manipulates network address translation (nat) - routes Manipulates routes - version Prints the version and exits - - Flags: - --config string config file (default is $HOME/.calico-bpf.yaml) - -h, --help help for calico-bpf - --log-level string Set log level (default "warn") - -t, --toggle Help message for toggle - ``` - - (Since the tool is embedded in the main `calico-node` binary the `--help` option is not available, but running - `calico-node -bpf help` does work.) - - To dump the BPF conntrack table: - - ``` - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf conntrack dump - ... - ``` - - Also, it is possible to fetch various counters, like packets dropped by a policy or different errors, from BPF dataplane using the same tool. - For example, to dump the BPF counters of `eth0` interface: - - ``` - kubectl exec -n calico-system calico-node-abcdef -- calico-node -bpf counters dump --iface=eth0 - +----------+--------------------------------+---------+--------+-----+ - | CATEGORY | TYPE | INGRESS | EGRESS | XDP | - +----------+--------------------------------+---------+--------+-----+ - | Accepted | by another program | 0 | 0 | 0 | - | | by failsafe | 0 | 2 | 23 | - | | by policy | 1 | 0 | 0 | - | Dropped | by policy | 0 | 0 | 0 | - | | failed decapsulation | 0 | 0 | 0 | - | | failed encapsulation | 0 | 0 | 0 | - | | incorrect checksum | 0 | 0 | 0 | - | | malformed IP packets | 0 | 0 | 0 | - | | packets with unknown route | 0 | 0 | 0 | - | | packets with unknown source | 0 | 0 | 0 | - | | packets with unsupported IP | 0 | 0 | 0 | - | | options | | | | - | | too short packets | 0 | 0 | 0 | - | Total | packets | 27 | 124 | 41 | - +----------+--------------------------------+---------+--------+-----+ - dumped eth0 counters. - ``` - -## Check if a program is dropping packets - -To check if an eBPF program is dropping packets, you can use either the `calico-bpf` or `tc` command-line tool. For example, if you -are worried that the eBPF program attached to `eth0` is dropping packets, you can use `calico-bpf` to fetch BPF counters as described -in the previous section and look for one of the `Dropped` counters or you can run the following command: - -``` -tc -s qdisc show dev eth0 -``` - -The output should look like the following; find the `clsact` qdisc, which is the attachment point for eBPF programs. -The `-s` option to `tc` causes `tc` to display the count of dropped packets, which amounts to the count of packets -dropped by the eBPF programs. - -``` -... -qdisc clsact 0: dev eth0 root refcnt 2 - sent 1340 bytes 10 pkt (dropped 10, overlimits 0 requeues 0) - backlog 0b 0p requeues 0 -... -``` - -## Debug high CPU usage - -If you notice `{{noderunning}}` using high CPU: - -- Check if `kube-proxy` is still running. If `kube-proxy` is still running, you must either disable `kube-proxy` or - ensure that the Felix configuration setting `bpfKubeProxyIptablesCleanupEnabled` is set to `false`. If the setting - is set to `true` (its default), then Felix will attempt to remove `kube-proxy`'s iptables rules. If `kube-proxy` is - still running, it will fight with `Felix`. -- If your cluster is very large, or your workload involves significant service churn, you can increase the interval - at which Felix updates the services dataplane by increasing the `bpfKubeProxyMinSyncPeriod` setting. The default is - 1 second. Increasing the value has the trade-off that service updates will happen more slowly. -- {{prodname}} supports endpoint slices, similarly to `kube-proxy`. If your Kubernetes cluster supports endpoint - slices and they are enabled, then you can enable endpoint slice support in {{prodname}} with the - `bpfKubeProxyEndpointSlicesEnabled` configuration flag. - -## eBPF program debug logs - -{{prodname}}'s eBPF programs contain optional detailed debug logging. Although th logs can be very verbose (because -the programs will log every packet), they can be invaluable to diagnose eBPF program issues. To enable the log, set the -`bpfLogLevel` Felix configuration setting to `Debug`. - -:::caution - -Enabling logs in this way has a significant impact on eBPF program performance. - -::: - -> The logs are emitted to the kernel trace buffer, and they can be examined using the following command: - -``` -tc exec bpf debug -``` - -Logs have the following format: - -``` - <...>-84582 [000] .Ns1 6851.690474: 0: ens192---E: Final result=ALLOW (-1). Program execution time: 7366ns -``` - -The parts of the log are explained below: - -- `<...>-84582` gives an indication about what program (or kernel process) was handling the - packet. For packets that are being sent, this is usually the name and PID of the program that is actually sending - the packet. For packets that are received, it is typically a kernel process, or an unrelated program that happens to - trigger the processing. -- `6851.690474` is the log timestamp. - -- `ens192---E` is the {{prodname}} log tag. For programs attached to interfaces, the first part contains the - first few characters of the interface name. The suffix is either `-I` or `-E` indicating "Ingress" or "Egress". - "Ingress" and "Egress" have the same meaning as for policy: - - - A workload ingress program is executed on the path from the host network namespace to the workload. - - A workload egress program is executed on the workload to host path. - - A host endpoint ingress program is executed on the path from external node to the host. - - A host endpoint egress program is executed on the path from host to external host. - -- `Final result=ALLOW (-1). Program execution time: 7366ns` is the message. In this case, logging the final result of - the program. Note that the timestamp is massively distorted by the time spent logging. - -## Poor performance - -A number of problems can reduce the performance of the eBPF dataplane. - -- Verify that you are using the best networking mode for your cluster. If possible, avoid using an overlay network; - a routed network with no overlay is considerably faster. If you must use one of {{prodname}}'s overlay modes, - use VXLAN, not IPIP. IPIP performs poorly in eBPF mode due to kernel limitations. -- If you are not using an overlay, verify that the [Felix configuration parameters](../../reference/felix/configuration.mdx) - `ipInIpEnabled` and `vxlanEnabled` are set to `false`. Those parameters control whether Felix configured itself to - allow IPIP or VXLAN, even if you have no IP pools that use an overlay. The parameters also disable certain eBPF - mode optimisations for compatibility with IPIP and VXLAN. - - To examine the configuration: - - ```bash - kubectl get felixconfiguration -o yaml - ``` - - ```yaml noValidation - apiVersion: projectcalico.org/v3 - items: - - apiVersion: projectcalico.org/v3 - kind: FelixConfiguration - metadata: - creationTimestamp: "2020-10-05T13:41:20Z" - name: default - resourceVersion: "767873" - uid: 8df8d751-7449-4b19-a4f9-e33a3d6ccbc0 - spec: - ... - ipipEnabled: false - ... - vxlanEnabled: false - kind: FelixConfigurationList - metadata: - resourceVersion: "803999" - ``` - -- If you are running your cluster in a cloud such as AWS, then your cloud provider may limit the bandwidth between - nodes in your cluster. For example, most AWS nodes are limited to 5GBit per connection. diff --git a/calico_versioned_docs/version-3.25/operations/ebpf/use-cases-ebpf.mdx b/calico_versioned_docs/version-3.25/operations/ebpf/use-cases-ebpf.mdx deleted file mode 100644 index db8385260d..0000000000 --- a/calico_versioned_docs/version-3.25/operations/ebpf/use-cases-ebpf.mdx +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Learn when to use eBPF, and when not to. ---- - -# eBPF use cases - -## Big picture - -Learn when to use eBPF (and when not to). - -## What is eBPF? - -eBPF is a feature available in Linux kernels that allows you to run a virtual machine inside the kernel. This virtual machine allows you to safely load programs into the kernel, to customize its operation. Why is this important? - -In the past, making changes to the kernel was difficult: there were APIs you could call to get data, but you couldn’t influence what was inside the kernel or execute code. Instead, you had to submit a patch to the Linux community and wait for it to be approved. With eBPF, you can load a program into the kernel and instruct the kernel to execute your program if, for example, a certain packet is seen or another event occurs. - -With eBPF, the kernel and its behavior become highly customizable, instead of being fixed. This can be extremely beneficial, when used under the right circumstances. - -## {{prodname}} and eBPF - -{{prodname}} offers an eBPF data plane as an alternative to our standard Linux dataplane (which is iptables based). While the standard data plane focuses on compatibility by working together with kube-proxy and your own iptables rules, the eBPF data plane focuses on performance, latency, and improving user experience with features that aren’t possible with the standard data plane. - -But {{prodname}} doesn’t only support standard Linux and eBPF; it currently supports a total of three data planes, including Windows HNS, and has plans to add support for even more data planes in the near future. {{prodname}} enables you, the user, to decide what works best for what you want to do. - -If you enable eBPF within {{prodname}} but have existing iptables flows, we won’t touch them. Because maybe you want to use connect-time load balancing, but leave iptables as is. With {{prodname}}, it’s not an all-or-nothing deal—we allow you to easily load and unload our eBPF data plane to suit your needs, which means you can quickly try it out before making a decision. {{prodname}} offers you the ability to leverage eBPF as needed, as an additional control to build your Kubernetes cluster security. - -## Use cases - -There are several use cases for eBPF, including traffic control, creating network policy, and connect-time load balancing. - -### Traffic control - -Without eBPF, packets use the standard Linux networking path on their way to a final destination. If a packet shows up at point A, and you know that the packet needs to go to point B, you can optimize the network path in the Linux kernel by sending it straight to point B. With eBPF, you can leverage additional context to make these changes in the kernel so that packets bypass complex routing and simply arrive at their final destination. - -This is especially relevant in a Kubernetes container environment, where you have numerous networks. (In addition to the host network stack, each container has its own mini network stack.) When traffic comes in, it is usually routed to a container stack and must travel a complex path as it makes its way there from the host stack. This routing can be bypassed using eBPF. - -### Creating network policy - -When creating network policy, there are two instances where eBPF can be used: - -- **eXpress Data Path (XDP)** – As a raw packet buffer enters the system, eBPF gives you an efficient way to examine that buffer and make quick decisions about what to do with it. - -- **Network policy** – eBPF allows you to efficiently examine a packet and apply network policy, both for pods and hosts. - -### Connect-time load balancing - -When load balancing service connections in Kubernetes, a port needs to talk to a service and therefore network address translation (NAT) must occur. A packet is sent to a virtual IP, and that virtual IP translates it to the destination IP of the pod backing the service; the pod then responds to the virtual IP and the return packet is translated back to the source. - -With eBPF, you can avoid this packet translation by using an eBPF program that you’ve loaded into the kernel and load balancing at the source of the connection. All NAT overhead from service connections is removed because destination network address translation (DNAT) does not need to take place on the packet processing path. - -## The price of performance - -So is eBPF more efficient than standard Linux iptables? The short answer: it depends. - -If you were to micro-benchmark how iptables works when applying network policies with a large number of IP addresses (i.e. ipsets), iptables in many cases is better than eBPF. But if you want to do something in the Linux kernel where you need to alter the packet flow in the kernel, eBPF would be the better choice. Standard Linux iptables is a complex system and certainly has its limitations, but at the same time it provides options to manipulate traffic; if you know how to program iptables rules, you can achieve a lot. eBPF allows you to load your own programs into the kernel to influence behavior that can be customized to your needs, so it is more flexible than iptables as it is not limited to one set of rules. - -Something else to consider is that, while eBPF allows you to run a program, add logic, redirect flows, and bypass processing—which is a definite win—it’s a virtual machine and as such must be translated to bytecode. By comparison, the Linux kernel’s iptables is already compiled to code. - -As you can see, comparing eBPF to iptables is not a straight apples-to-apples comparison. What we need to assess is performance, and the two key factors to look at here are latency (speed) and expense. If eBPF is very fast but takes up 80% of your resources, then it’s like a Lamborghini—an expensive, fast car. And if that works for you, great (maybe you really like expensive, fast cars). Just keep in mind that more CPU usage means more money spent with your cloud providers. So while a Lamborghini might be faster than a lot of other cars, it might not be the best use of money if you need to comply with speed limits on your daily commute. - -## When to use eBPF (and when not to) - -With eBPF, you get performance—but it comes at a cost. You need to find a balance between the two by figuring out the price of performance, and deciding if it’s acceptable to you from an eBPF perspective. - -Let’s look at some specific cases where it would make sense to use eBPF, and some where it would not. - -### When not to use eBPF - -### ✘ Packet-by-packet processing - -Using eBPF to perform CPU intensive or packet-by-packet processing, such as decryption and re-encryption for encrypted flows, would not be efficient because you would need to build a structure and do a lookup for every packet, which is expensive. - -### When to use eBPF - -### ✔ XDP - -eBPF provides an efficient way to examine raw packet buffers as they enter the system, allowing you to make quick decisions about what to do with them. - -### ✔ Connect-time load balancing - -With eBPF, you can load balance at the source using a program you’ve loaded into the kernel, instead of using a virtual IP. Since DNAT does not need to take place on the packet processing path, all NAT overhead from service connections is removed. - -### ✔ Building a service mesh control plane - -Service mesh relies on proxies like Envoy. A lot of thought has gone into designing this process over the years. The main reason for doing it this way is that, in many cases, it is not viable to do inline processing for application protocols like HTTP at the high speeds seen inside a cluster. Therefore, you should think of using eBPF to route traffic to a proxy like Envoy in an efficient way, rather than using it to replace the proxy itself. However, you do need to turn off connect-time load balancing (CTLB) so sidecars can see the service addresses. Given you are already taking a performance hit by the extra hop to the sidecar, not using CTLB performance optimization to avoid NAT overhead is likely not a big deal. - -## Summary - -Is eBPF a replacement for iptables? Not exactly. It’s hard to imagine everything working as efficiently with eBPF as it does with iptables. For now, the two co-exist and it’s up to the user to weigh the price-performance tradeoff and decide which feature to use when, given their specific needs. - -We believe the right solution is to leverage eBPF, along with existing mechanisms in the Linux kernel, to achieve your desired outcome. That’s why {{prodname}} offers support for multiple data planes, including standard Linux, Windows HNS, and Linux eBPF. Since we have established that both eBPF and iptables are useful, the only logical thing to do in our opinion is to support both. {{prodname}} gives you the choice so you can choose the best tool for the job. - -## Additional resources - -To learn more and see performance metrics from our test environment, see the blog, [Introducing the eBPF dataplane](https://www.projectcalico.org/introducing-the-calico-ebpf-dataplane/). diff --git a/calico_versioned_docs/version-3.25/operations/fips.mdx b/calico_versioned_docs/version-3.25/operations/fips.mdx deleted file mode 100644 index 63d79718f3..0000000000 --- a/calico_versioned_docs/version-3.25/operations/fips.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -description: Run Calico using FIPS validated cryptography. ---- - -# FIPS mode - -## Big picture - -Run {{prodname}} in [FIPS 140-2](https://csrc.nist.gov/publications/detail/fips/140/2/final) compliant mode. - -## Value - -When running in FIPS compliant mode, {{prodname}} uses FIPS-approved cryptographic algorithms and NIST-validated cryptographic modules. - -## Concepts - -The Federal Information Processing Standards (FIPS) are publicly announced standards developed by the National Institute of Standards and Technology for use in computer systems by government agencies and government contractors. {{prodname}} FIPS mode is enabled during installation by: - -- Switching the cryptographic modules for the golang-based applications to use the FIPS-140-2 validated [Tigera Cryptographic Module](https://csrc.nist.gov/projects/cryptographic-module-validation-program/certificate/4095) -- Configuring TLS servers and other cryptographic functions to use FIPS 140-2 approved cryptographic algorithms - -## Before you begin - -**Required** - -- A Kubernetes distribution and cluster that run in FIPS mode -- The hosts must run Linux x86_64 distributions -- {{prodname}} contains programs that run directly on the host that use dynamic linking of c libraries. For this reason, it is a requirement for host systems to contain the following libraries: - - ld-linux-x86-64.so.2 - - libpthread.so.0 - - libc.so.6 - -**Unsupported** - -- The following features are disabled and are not allowed to be used: - - Application Layer API - - BGP password - - WireGuard -- Switching FIPS mode off and then on again is not supported because this can break hashes and other cryptographic settings. - -## How To - -To install {{prodname}} in FIPS mode follow these steps. - -Follow [the installation steps](../getting-started/kubernetes/index.mdx) for your platform. - - - In the step for installing custom resources, edit `custom-resources.yaml` and enable FIPS mode in the installation spec. - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - name: default - spec: - fipsMode: Enabled - ``` - - - For more information on configuration options available in this manifest, see [the installation reference](../reference/installation/api.mdx). - -After you apply the YAML, FIPS mode is fully operational. diff --git a/calico_versioned_docs/version-3.25/operations/image-options/alternate-registry.mdx b/calico_versioned_docs/version-3.25/operations/image-options/alternate-registry.mdx deleted file mode 100644 index 0321850ebd..0000000000 --- a/calico_versioned_docs/version-3.25/operations/image-options/alternate-registry.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: Configure Calico to pull images from a public or private registry. ---- - -# Configure use of your image registry - -import MaintenanceImageOptionsAlternateRegistry from '../../_includes/components/MaintenanceImageOptionsAlternateRegistry'; - -## Big picture - -Configure {{prodname}} to pull images from a registry (public or private). - -## Value - -In many deployments, installing {{prodname}} in clusters from third-party private repos is not an option. {{prodname}} offers these public and private registry options, which can be used in any combination: - -- **Install from a registry** for use cases like air-gapped clusters, or clusters with bandwidth or security constraints -- **Install from an image path in a registry** if you have pulled {{prodname}} images to a sub path in your registry -- [Install images by registry digest](imageset.mdx) - -## Concepts - -A **container image registry** (often known as a **registry**), is a service where you can push, pull, and store container images. In Kubernetes, a registry is considered _private_ if it is not publicly available. - -A **private registry** requires an **image pull secret**. An **image pull secret** provides authentication for an image registry; this allows you to control access to certain images or give access to higher pull rate limits (like with DockerHub). - -An **image path** is a directory in a registry that contains images required to install {{prodname}}. - -## Before you begin - -**Required** - -- {{prodname}} is managed by the operator -- Configure pull access to your registry -- If you are using a private registry that requires user authentication, ensure that an image pull secret is configured for your registry in the tigera-operator namespace. Set the environment variable, `REGISTRY_PULL_SECRET` to the secret name. For help, see `imagePullSecrets` and `registry` fields, in [Installation resource reference](../../reference/installation/api.mdx). - -## How to - -The following examples show the path format for public and private registry, `$REGISTRY/`. If you are using an image path, substitute the format: `$REGISTRY/$IMAGE_PATH/`. - -### Push {{prodname}} images to your registry - -To install images from your registry, you must first pull the images from Tigera's registry, retag them with your own registry, and then push the newly-tagged images to your own registry. - - - -### Run the operator using images from your registry - -Before applying `tigera-operator.yaml`, modify registry references to use your custom registry: - -**For OpenShift** - -Download all manifests first, then modify the following: - -```bash -sed -ie "s?quay.io?$REGISTRY?g" manifests/02-tigera-operator.yaml -``` - -**For all other platforms** - -```bash -sed -ie "s?quay.io?$REGISTRY?g" tigera-operator.yaml -``` - -Next, if you are implementing user authentication to access a private registry, add the image pull secret for your `registry` to the secret `tigera-pull-secret`. - -```bash -sed -ie "/serviceAccountName: tigera-operator/a \ imagePullSecrets:\n\ - name: $REGISTRY_PULL_SECRET" tigera-operator.yaml -``` - -### Configure the operator to use images - -Set the `spec.registry` field of your Installation resource to the name of your custom registry. For example: - -```yaml -apiVersion: operator.tigera.io/v1 -kind: Installation -metadata: - name: default -spec: - variant: Calico - imagePullSecrets: - - name: tigera-pull-secret - // highlight-next-line - registry: myregistry.com -``` diff --git a/calico_versioned_docs/version-3.25/operations/image-options/imageset.mdx b/calico_versioned_docs/version-3.25/operations/image-options/imageset.mdx deleted file mode 100644 index df60821f52..0000000000 --- a/calico_versioned_docs/version-3.25/operations/image-options/imageset.mdx +++ /dev/null @@ -1,244 +0,0 @@ ---- -description: Specify the digests for operator to use to deploy images. ---- - -# Install images by registry digest - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Deploy images by container registry digest for operator installations. - -## Value - -Some deployments have strict security requirements that require deploying images by immutable digest instead of tags. -Once released, official {{prodname}} images and tags will not be modified. However using an immutable digest allows specific images to be reviewed -and verified by security teams. - -## Concepts - -### Container registry - -A container registry provides access to container images referenced by tags or digest. - -### Image tag - -Versioned container images are typically referenced by a tag which is appended to an image reference. Example: `/:`. Container image tags are typically not expected be changed or updated, but this is not required or enforced by most image registries, meaning it is possible to push new code to the same image tag. - -### Image digest - -Container images, when added to a container registry, have a unique hash created that can be used to pull a specific version of an image that cannot be changed or updated. - -## Before you begin - -**Required** - -- {{prodname}} managed by the operator -- Docker client is configured to pull images from the container registries where images are stored -- Kubernetes permissions to apply an ImageSet manifest to your cluster - -## How to - -1. [Update the operator deployment with a digest](#update-the-operator-deployment-with-a-digest) -2. [Create an ImageSet](#create-an-imageset) -3. [Verify the correct ImageSet is being used](#verify-the-correct-imageset-is-being-used) - -**Other tasks** - -- [Create new ImageSet when upgrading or downgrading](#create-new-imageset-when-upgrading-or-downgrading) - -**Troubleshooting** - -- [Why does the Installation resource status not include my ImageSet?](#why-does-the-installation-resource-status-not-include-my-imageset) -- [How can I tell if there is a problem with my ImageSet?](#how-can-i-tell-if-there-is-a-problem-with-my-imageset) - -### Update the operator deployment with a digest - -Before applying `tigera-operator.yaml`, modify the operator deployment to use the operator image digest. - -Use commands like the following to get the image digest (adjust the image in the commands if you are using a different operator image): - -```bash -docker pull {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} -docker inspect {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} -f '{{range .RepoDigests}}{{printf "%s\n" .}}{{end}}' -``` - -If multiple digests are returned, select the one matching the registry you are using. - -Update the tigera-operator deployment: - -```bash -sed -ie "s|\(image: .*/operator\):.*|?\1@|" tigera-operator.yaml -``` - -### Create an ImageSet - -Create an [ImageSet](../../reference/installation/api.mdx#operator.tigera.io/v1.ImageSet) manifest file named `imageset.yaml` like the following: - -```yaml -apiVersion: operator.tigera.io/v1 -kind: ImageSet -metadata: - name: calico-{{releaseTitle}} -spec: - images: - - image: 'calico/apiserver' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'calico/cni' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'calico/kube-controllers' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'calico/node' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'calico/typha' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'calico/pod2daemon-flexvol' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'calico/windows-upgrade' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'tigera/operator' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' - - image: 'tigera/key-cert-provisioner' - digest: 'sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef' -``` - -You can create an ImageSet manifest manually or by script. - - - - -1. Copy the above example into a file called `imageset.yaml` and edit that file in the steps below. -1. Set the name for your ImageSet to `calico-` (Example: `calico-{{releaseTitle}}`). - The version can be obtained by running: - ``` - docker run {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} --version - ``` -1. Add the correct digest for each image. If you are using a private registry, ensure you pull the image from the private registry and use the digest associated with the private registry. - - 1. If using the default images, get a list of them by running: - - ``` - docker run {{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} --print-images=list - ``` - - :::note - - If you are not using the default image registries or paths, you must create your own list of images (and the above command will not apply). - - ::: - :::note - - The list will contain images for an Enterprise deployment but they do not need to be added to the ImageSet. - - ::: - - 1. Get the needed digests by using the images returned from the above step in the following command: - ``` - docker pull && docker inspect -f '{{range .RepoDigests}}{{printf "%s\n" .}}{{end}}' - ``` - 1. Use the digest from the image that matches the repo/image you will use. - If you are using a private registry or have specified an [imagePath](../../reference/installation/api.mdx#operator.tigera.io/v1.Installation) - you will still use the "default" `/` in the `image` field, for example if you your node image is coming from - `example.com/registry/imagepath/node` you will still use `calico/node` in the image field of the ImageSet. - :::note - - For image `quay.io/tigera/operator@sha256:d111db2f94546415a30eff868cb946d47e183faa804bd2e9a758fd9a8a4eaff1` copy everything after `@` and add it as the digest for the `tigera/operator` image. - - ::: - - - - -Copy the following script into a file, make it executable, and run the script. The script creates an `imageset.yaml` file in the directory it was run. -:::note - -This script will only work if using the default registries and image paths. - -::: - -``` -#!/bin/bash -e - -images=(calico/apiserver calico/cni calico/kube-controllers calico/node calico/typha calico/pod2daemon-flexvol calico/windows-upgrade tigera/key-cert-provisioner tigera/operator) - -OPERATOR_IMAGE={{tigeraOperator.registry}}/{{tigeraOperator.image}}:{{tigeraOperator.version}} -echo "Pulling $OPERATOR_IMAGE" -echo -docker pull $OPERATOR_IMAGE -q >/dev/null -versions=$(docker run $OPERATOR_IMAGE --version) -ver=$(echo -e "$versions" | grep 'Calico:') - -imagelist=($(docker run $OPERATOR_IMAGE --print-images=list)) - -cat > ./imageset.yaml <> ./imageset.yaml - echo " digest: \"$digest\"" >> ./imageset.yaml - fi - done -done -``` - - - - -Apply the created `imageset.yaml` to your cluster. - -### Verify the correct ImageSet is being used - -1. Check tigerastatus for components that are Degraded with `kubectl get tigerastatus`. - - If any components show Degraded, [investigate further](#how-can-i-tell-if-there-is-a-problem-with-my-imageset). -2. When tigerastatus for all components show Available True, the ImageSet has been applied. - ``` - NAME AVAILABLE PROGRESSING DEGRADED SINCE - calico True False False 54s - ``` -3. Verify that the correct ImageSet is being used. In Installation status, check that the `imageset` field is set to the ImageSet you created. - Check the field by running the following command: - ``` - kubectl get installation default -o yaml | grep imageSet - ``` - You should see output similar to: - ``` - imageSet: calico-{{releaseTitle}} - ``` - -## Other tasks - -### Create new ImageSet when upgrading or downgrading - -Before upgrading to a new release or downgrading, you must create a new [ImageSet](../../reference/installation/api.mdx#operator.tigera.io/v1.ImageSet) -with updated image references and names for the new release. This must be done prior -to upgrading the cluster so when the new manifests are applied, the appropriate ImageSet is available. - -## Troubleshooting - -### Why does the Installation Resource status not include my ImageSet? - -The [status.imageset](../../reference/installation/api.mdx#operator.tigera.io/v1.InstallationStatus) field of -the Installation Resource will not be updated until the `calico` component has fully been deployed. `calico` is -fully deployed when `kubectl get tigerastatus calico` reports Available True with Progressing and Degraded as False. - -### How can I tell if there is a problem with my ImageSet? - -If you suspect an issue with your ImageSet, check tigerastatus with `kubectl get tigerastatus`. If any components are -degraded, you can get additional information with `kubectl get tigerastatus -o yaml`. If the digest -provided for an image is incorrect or unable to be pulled, the tigerastatus will not directly report that information, -but you should see information that there is an issue rolling out a Deployment, Daemonset, or Job. If you suspect -an issue with a resource rollout due to an issue with an image, you will need to `get` or `describe` a specific pod -to see details about the problem. diff --git a/calico_versioned_docs/version-3.25/operations/image-options/index.mdx b/calico_versioned_docs/version-3.25/operations/image-options/index.mdx deleted file mode 100644 index b65e8c2905..0000000000 --- a/calico_versioned_docs/version-3.25/operations/image-options/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Install Calico using from a public or private registry, or by digest. -hide_table_of_contents: true ---- - -# Install using an alternate registry - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/index.mdx b/calico_versioned_docs/version-3.25/operations/index.mdx deleted file mode 100644 index a068ff8c55..0000000000 --- a/calico_versioned_docs/version-3.25/operations/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Post-installation tasks for managing Calico including upgrading and troubleshooting. -hide_table_of_contents: true ---- - -# Operations - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/install-apiserver.mdx b/calico_versioned_docs/version-3.25/operations/install-apiserver.mdx deleted file mode 100644 index b035c7598d..0000000000 --- a/calico_versioned_docs/version-3.25/operations/install-apiserver.mdx +++ /dev/null @@ -1,194 +0,0 @@ ---- -description: Install the Calico API server on an existing Calico cluster ---- - -# Enable kubectl to manage Calico APIs - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -[ **Feature status**: GA in Calico v3.20+ ] - -Install the Calico API server on an existing cluster to enable management of Calico APIs using kubectl. - -## Value - -The API server provides a REST API for Calico, and allows management of `projectcalico.org/v3` APIs using kubectl without the need for calicoctl. - -:::note - -Starting in Calico v3.20.0, new operator-based installations of Calico include the API server component by default, so the instructions -in this document are not required. - -::: - -## Before you begin - -- Make sure you have a cluster with Calico installed using the Kubernetes API data store. If not, you can [migrate from etcd](datastore-migration.mdx). - -- Upgrade to Calico v3.20+ using the appropriate [upgrade instructions](upgrading/index.mdx). - -- For non-operator installations, you will need a machine with `openssl` installed. - -## Concepts - -### calicoctl vs kubectl - -In previous releases, calicoctl has been required to manage Calico API resources in the `projectcalico.org/v3` API group. The calicoctl CLI tool provides important validation and defaulting on these APIs. The Calico API server performs -that defaulting and validation server-side, exposing the same API semantics without a dependency on calicoctl. - -calicoctl is still required for the following subcommands: - -- [calicoctl node](../reference/calicoctl/node/index.mdx) -- [calicoctl ipam](../reference/calicoctl/ipam/index.mdx) -- [calicoctl convert](../reference/calicoctl/convert.mdx) -- [calicoctl version](../reference/calicoctl/version.mdx) - -## How to - -### Install the API server - -Select the method below based on your installation method. - - - - -1. Create an instance of an `operator.tigera.io/APIServer` with the following contents. - - ```yaml - apiVersion: operator.tigera.io/v1 - kind: APIServer - metadata: - name: default - spec: {} - ``` - -1. Confirm it appears as `Available` with the following command. - - ``` - kubectl get tigerastatus apiserver - ``` - - You should see the following output: - - ``` - NAME AVAILABLE PROGRESSING DEGRADED SINCE - apiserver True False False 1m10s - ``` - - - - -1. Create the following manifest, which will install the API server as a deployment in the `calico-apiserver` namespace. - - ``` - kubectl create -f {{manifestsUrl}}/manifests/apiserver.yaml - ``` - - You will notice that the API server remains in a `ContainerCreating` state, as it is waiting for credentials to be provided for authenticating the main Kubernetes API server. - -1. Generate a private key and CA bundle using the following openssl command. This certificate will be used by the main API server to authenticate with the Calico API server. - - :::note - - Please note in the following command `-addext` argument requires openssl 1.1.1 or above. You can check your version of openssl using `openssl version`. - - ::: - - ``` - openssl req -x509 -nodes -newkey rsa:4096 -keyout apiserver.key -out apiserver.crt -days 365 -subj "/" -addext "subjectAltName = DNS:calico-api.calico-apiserver.svc" - ``` - -1. Provide the key and certificate to the Calico API server as a Kubernetes secret. - - ``` - kubectl create secret -n calico-apiserver generic calico-apiserver-certs --from-file=apiserver.key --from-file=apiserver.crt - ``` - -1. Configure the main API server with the CA bundle. - - ``` - kubectl patch apiservice v3.projectcalico.org -p \ - "{\"spec\": {\"caBundle\": \"$(kubectl get secret -n calico-apiserver calico-apiserver-certs -o go-template='{{ index .data "apiserver.crt" }}')\"}}" - ``` - - - - -After following the above steps, you should see the API server pod become ready, and Calico API resources become available. You can check whether the APIs are available with the following command: - -``` -kubectl api-resources | grep '\sprojectcalico.org' -``` - -You should see the following output: - -``` -bgpconfigurations bgpconfig,bgpconfigs projectcalico.org false BGPConfiguration -bgppeers projectcalico.org false BGPPeer -clusterinformations clusterinfo projectcalico.org false ClusterInformation -felixconfigurations felixconfig,felixconfigs projectcalico.org false FelixConfiguration -globalnetworkpolicies gnp,cgnp,calicoglobalnetworkpolicies projectcalico.org false GlobalNetworkPolicy -globalnetworksets projectcalico.org false GlobalNetworkSet -hostendpoints hep,heps projectcalico.org false HostEndpoint -ippools projectcalico.org false IPPool -kubecontrollersconfigurations projectcalico.org false KubeControllersConfiguration -networkpolicies cnp,caliconetworkpolicy,caliconetworkpolicies projectcalico.org true NetworkPolicy -networksets netsets projectcalico.org true NetworkSet -profiles projectcalico.org false Profile -``` - -:::note - -kubectl may continue to prefer the crd.projectcalico.org API group due to the way it caches APIs locally. You can force kubectl to update -by removing its cache directory for your cluster. By default, the cache is located in `$(HOME)/.kube/cache`. - -::: - -### Use kubectl for projectcalico.org APIs - -Once the API server has been installed, you can use kubectl to interact with the Calico APIs. For example, you can view and edit IP pools. - -``` -kubectl get ippools -``` - -You should see output that looks like this: - -``` -NAME CREATED AT -default-ipv4-ippool 2021-03-19T16:47:12Z -``` - -### Uninstall the Calico API server - -To uninstall the API server, use the following instructions depending on your install method. - - - - -``` - kubectl delete apiserver default -``` - - - - -``` - kubectl delete -f {{manifestsUrl}}/manifests/apiserver.yaml -``` - - - - -Once removed, you will need to use calicoctl to manage projectcalico.org/v3 APIs. - -## Next steps - -**Recommended tutorials** - -- [Secure a simple application using the Kubernetes NetworkPolicy API](../network-policy/get-started/kubernetes-policy/kubernetes-policy-basic.mdx) -- [Control ingress and egress traffic using the Kubernetes NetworkPolicy API](../network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced.mdx) -- [Run a tutorial that shows blocked and allowed connections in real time](../network-policy/get-started/kubernetes-policy/kubernetes-demo.mdx) diff --git a/calico_versioned_docs/version-3.25/operations/monitor/index.mdx b/calico_versioned_docs/version-3.25/operations/monitor/index.mdx deleted file mode 100644 index 486c353c54..0000000000 --- a/calico_versioned_docs/version-3.25/operations/monitor/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Tools for scraping useful metrics -hide_table_of_contents: true ---- - -# Monitor - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-metrics.mdx b/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-metrics.mdx deleted file mode 100644 index c11b109b4d..0000000000 --- a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-metrics.mdx +++ /dev/null @@ -1,661 +0,0 @@ ---- -description: Use open source Prometheus for monitoring and alerting on Calico components. ---- - -# Monitor Calico component metrics - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -## Big picture - -Use Prometheus configured for {{prodname}} components to get valuable metrics about the health of {{prodname}}. - -## Value - -Using the open-source Prometheus monitoring and alerting toolkit, you can view time-series metrics from {{prodname}} components in the Prometheus or Grafana interfaces. - -## Concepts - -### About Prometheus - -The Prometheus monitoring tool scrapes metrics from instrumented jobs and displays time series data in a visualizer (such as Grafana). For {{prodname}}, the “jobs” that Prometheus can harvest metrics from are the Felix and Typha components. - -### About {{prodname}} Felix, Typha, and kube-controllers components - -**Felix** is a daemon that runs on every machine that implements network policy. Felix is the brains of {{prodname}}. Typha is an optional set of pods that extends Felix to scale traffic between {{prodname}} nodes and the datastore. The kube-controllers pod runs a set of controllers which are responsible for a variety of control plane functions, such as resource garbage collection and synchronization with the Kubernetes API. - -You can configure Felix, Typha, and/or kube-controllers to provide metrics to Prometheus. - -## Before you begin... - -In this tutorial we assume that you have completed all other introductory tutorials and possess a running Kubernetes cluster with {{prodname}}. You can either use `kubectl` or `calicoctl` to perform the following steps. Depending on which tool you would like to use, make sure you have the necessary prerequisites as shown below. - - - - -If you wish to modify {{prodname}} configurations with `kubectl` binary you need to make sure you have the {{prodname}} API server in your cluster. The API server allows you to manage resources within the `projectcalico.org/v3` api group. - -:::note - -Operator based installs include the API server by default. - -::: - -For more information about the API server please use [this link](../install-apiserver.mdx). - - - - -You can run `calicoctl` on any host with network access to the Calico datastore as either a binary or a container to manage Calico APIs in the `projectcalico.org/v3` API group. - -For more information about calicoctl please use [this link](../calicoctl/install.mdx). - - - - -## How to - -This tutorial will go through the necessary steps to implement basic monitoring of {{prodname}} with Prometheus. - -1. Configure {{prodname}} to enable the metrics reporting. -2. Create the namespace and service account that Prometheus will need. -3. Deploy and configure Prometheus. -4. View the metrics in the Prometheus dashboard and create a simple graph. - -### 1. Configure {{prodname}} to enable metrics reporting - -#### **Felix configuration** - -Felix prometheus metrics are **disabled** by default. -:::note - -A comprehensive list of configuration values can be [found at this link](../../reference/felix/configuration.mdx). - -::: - -Use the following command to enable Felix metrics. - - - - -```bash -kubectl patch felixconfiguration default --type merge --patch '{"spec":{"prometheusMetricsEnabled": true}}' -``` - -You should see an output like below: - -``` -felixconfiguration.projectcalico.org/default patched -``` - - - - -```bash -calicoctl patch felixconfiguration default --patch '{"spec":{"prometheusMetricsEnabled": true}}' -``` - -You should see an output like below: - -``` -Successfully patched 1 'FelixConfiguration' resource -``` - - - - -#### **Creating a service to expose Felix metrics** - -Prometheus uses Kubernetes services to dynamically discover endpoints. Here you will create a service named `felix-metrics-svc` which Prometheus will use to discover all the Felix metrics endpoints. - -:::note - -Felix by default uses port 9091 TCP to publish its metrics. - -::: - - - - -```bash -kubectl apply -f - < - - -```bash -kubectl apply -f - < - - -#### **Typha Configuration** - - - - -An Operator installation of {{prodname}} automatically deploys one or more Typha instances depending on the scale of your cluster. By default metrics for these instances are disabled. - -Use the following command to instruct `tigera-operator` to enable Typha metrics. - -```bash -kubectl patch installation default --type=merge -p '{"spec": {"typhaMetricsPort":9093}}' -``` - -You should see a result similar to: - -```bash -installation.operator.tigera.io/default patched -``` - - - - -:::note - -Typha implementation is optional, if you don't have Typha in your cluster you can skip [Typha configuration](#typha-configuration) section. - -::: - -If you are uncertain whether you have `Typha` in your cluster execute the following code: - -```bash -kubectl get pods -A | grep typha -``` - -If your result is similar to what is shown below you are using Typha in your cluster. - -:::note - -The name suffix of pods shown below was dynamically generated. Your typha instance might have a different suffix. - -::: - -``` -kube-system calico-typha-56fccfcdc4-z27xj 1/1 Running 0 28h -kube-system calico-typha-horizontal-autoscaler-74f77cd87c-6hx27 1/1 Running 0 28h -``` - -You can enable Typha metrics to be consumed by Prometheus via [two ways](../../reference/typha/configuration.mdx). - - - - -#### **Creating a service to expose Typha metrics** - -:::note - -Typha uses **port 9091** TCP by default to publish its metrics. However, if {{prodname}} is installed using [Amazon yaml file](https://github.com/aws/amazon-vpc-cni-k8s/blob/b001dc6a8fff52926ed9a93ee6c4104f02d365ab/config/v1.5/calico.yaml#L535-L536) this port will be 9093 as its set manually via **TYPHA_PROMETHEUSMETRICSPORT** environment variable. - -::: - - - - -```bash -kubectl apply -f - < - - -```bash -kubectl apply -f - < - - -#### **kube-controllers configuration** - -Prometheus metrics are **enabled** by default on [TCP port 9094](../../reference/resources/kubecontrollersconfig.mdx) for `calico-kube-controllers`. - - - - -The operator automatically creates a service that exposes these metrics. - -You can use the following command to verify it. - -```bash -kubectl get svc -n calico-system -``` - -You should see a result similar to: - -```bash -calico-kube-controllers-metrics ClusterIP 10.43.77.57 9094/TCP 39d -``` - - - - -#### **Creating a service to expose kube-controllers metrics** - -Create a service to expose `calico-kube-controllers` metrics to Prometheus. - -```bash -kubectl apply -f - < - - -**Optionally**, you can use the following command to modify the port by changing the `KubeControllersConfiguration` resource if desired. - -:::note - -Setting this value to zero will disable metrics in the kube-controllers pod. - -::: - - - - -```bash -kubectl patch kubecontrollersconfiguration default --type=merge --patch '{"spec":{"prometheusMetricsPort": 9095}}' -``` - - - - -```bash -calicoctl patch kubecontrollersconfiguration default --patch '{"spec":{"prometheusMetricsPort": 9095}}' -``` - - - - -### 2. Cluster preparation - -#### **Namespace creation** - -`Namespace` isolates resources in your cluster. Here you will create a Namespace called `calico-monitoring` to hold your monitoring resources. -:::note - -Kubernetes namespaces guide can be [found at this link](https://kubernetes.io/docs/tasks/administer-cluster/namespaces/). - -::: - -```bash -kubectl create -f -< - - -```bash -kubectl apply -f - < - - -```bash -kubectl apply -f - < - - -#### **Create Prometheus pod** - -Now that you have a `serviceaccount` with permissions to gather metrics and have a valid config file for your Prometheus, it's time to create the Prometheus pod. - -```bash -kubectl apply -f - < - - -```bash -kubectl delete service felix-metrics-svc -n calico-system -kubectl delete service typha-metrics-svc -n calico-system -``` - - - - -``` -kubectl delete service felix-metrics-svc -n kube-system -kubectl delete service typha-metrics-svc -n kube-system -kubectl delete service kube-controllers-metrics-svc -n kube-system -``` - - - - -Return {{prodname}} configurations to their default state. - - - - -```bash -kubectl patch felixConfiguration default --type merge --patch '{"spec":{"prometheusMetricsEnabled": false}}' -kubectl patch installation default --type=json -p '[{"op": "remove", "path":"/spec/typhaMetricsPort"}]' -``` - - - - -```bash -calicoctl patch felixConfiguration default --patch '{"spec":{"prometheusMetricsEnabled": false}}' -``` - - - - -Finally, remove the namespace and RBAC permissions. - -```bash -kubectl delete namespace calico-monitoring -kubectl delete ClusterRole calico-prometheus-user -kubectl delete clusterrolebinding calico-prometheus-user -``` - -## Best practices - -If you enable {{prodname}} metrics to Prometheus, a best practice is to use network policy to limit access to the {{prodname}} metrics endpoints. For details, see [Secure {{prodname}} Prometheus endpoints](../../network-policy/comms/secure-metrics.mdx). - -If you are not using Prometheus metrics, we recommend disabling the Prometheus ports entirely for more security. - -## Next Steps - -[Visualizing metrics via Grafana.](monitor-component-visual.mdx) diff --git a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-visual.mdx b/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-visual.mdx deleted file mode 100644 index 4484417523..0000000000 --- a/calico_versioned_docs/version-3.25/operations/monitor/monitor-component-visual.mdx +++ /dev/null @@ -1,199 +0,0 @@ ---- -description: Use open source Grafana for visualizing Calico components. ---- - -# Visualizing metrics via Grafana - -## Big picture - -Use Grafana dashboard to view {{prodname}} component metrics. - -## Value - -Using Grafana can be beneficial by providing a means to visualize metrics through graphs that can help you quickly identify unusual activity. The following image shows some of the graphs and metrics that are available for you to leverage to achieve this goal. - -![](/img/calico/grafana-dashboard.png) - -## Concepts - -### About Grafana - -Grafana is an open source visualization and analytics tool that allows you to query, visualize, alert on, and explore metrics from a variety of data source, including Calico component metrics stored in Prometheus. - -### About Prometheus - -Prometheus is an open source monitoring tool that scrapes metrics from instrumented components and stores them as time series data which can then be visualized using tools such as Grafana. - -## Before you begin... - -In this tutorial we assume you have - -- a running Kubernetes cluster with {{prodname}}, calicoctl and kubectl installed -- completed all steps in the [monitor component metrics](monitor-component-metrics.mdx) guide to set up Prometheus to gather {{prodname}} component metrics. - -## How to - -This tutorial will go through the necessary steps to create {{prodname}} metrics dashboards with Grafana. - -### Preparing Prometheus - -Here you will create a service to make your prometheus visible to Grafana. - -```bash -kubectl apply -f - < 27h v1.18.0 -ip-10-0-0-12 Ready 27h v1.18.0 - -``` - -### Verify calico-node pods are running on every node, and are in a healthy state - -```bash -kubectl get pods -n calico-system -o wide -``` - -``` -NAME READY STATUS RESTARTS AGE IP NODE -calico-node-77zgj 1/1 Running 0 27h 10.0.0.10 ip-10-0-0-10 -calico-node-nz8k2 1/1 Running 0 27h 10.0.0.11 ip-10-0-0-11 -calico-node-7trv7 1/1 Running 0 27h 10.0.0.12 ip-10-0-0-12 -``` - -### Exec into pod for further troubleshooting - -```bash -kubectl run multitool --image=praqma/network-multitool - -kubectl exec -it multitool -- bash -``` - -``` -bash-5.0 ping 8.8.8.8 -PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data. -64 bytes from 8.8.8.8: icmp_seq=1 ttl=97 time=6.61 ms -64 bytes from 8.8.8.8: icmp_seq=2 ttl=97 time=6.64 ms -``` - -### Collect {{prodname}} diagnostic logs - -```bash -sudo calicoctl node diags -``` - -``` -Collecting diagnostics -Using temp dir: /tmp/calico194224816 -Dumping netstat -Dumping routes (IPv4) -Dumping routes (IPv6) -Dumping interface info (IPv4) -Dumping interface info (IPv6) -Dumping iptables (IPv4) -Dumping iptables (IPv6) - -Diags saved to /tmp/calico194224816/diags-20201127_010117.tar.gz -``` - -## Kubernetes - -### Verify all pods are running - -```bash -kubectl get pods -A -``` - -``` -kube-system coredns-66bff467f8-dxbtl 1/1 Running 0 27h -kube-system coredns-66bff467f8-n95vq 1/1 Running 0 27h -kube-system etcd-ip-10-0-0-10 1/1 Running 0 27h -kube-system kube-apiserver-ip-10-0-0-10 1/1 Running 0 27h -``` - -### Verify Kubernetes API server is running - -```bash -kubectl cluster-info -``` - -``` -Kubernetes master is running at https://10.0.0.10:6443 -KubeDNS is running at https://10.0.0.10:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy -ubuntu@master:~$ kubectl get svc -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes ClusterIP 10.49.0.1 443/TCP 2d2h -``` - -### Verify Kubernetes kube-dns is working - -```bash -kubectl get svc -``` - -``` -NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE -kubernetes ClusterIP 10.49.0.1 443/TCP 2d2h -``` - -```bash -kubectl exec -it multitool bash -``` - -``` -bash-5.0 curl -I -k https://kubernetes -HTTP/2 403 -cache-control: no-cache, private -content-type: application/json -x-content-type-options: nosniff -content-length: 234 -``` - -```bash -bash-5.0 nslookup google.com -``` - -``` -Server: 10.49.0.10 -Address: 10.49.0.10#53 -Non-authoritative answer: -Name: google.com -Address: 172.217.14.238 -Name: google.com -Address: 2607:f8b0:400a:804::200e -``` - -### Verify that kubelet is running on the node with the correct flags - -```bash -systemctl status kubelet -``` - -If there is a problem, check the journal - -```bash -journalctl -u kubelet | head -``` - -### Check the status of other system pods - -Look especially at coredns; if they are not getting an IP, something is wrong with the CNI - -```bash -kubectl get pod -n kube-system -o wide -``` - -But if other pods fail, it is likely a different issue. Perform normal Kubernetes troubleshooting. For example: - -```bash -kubectl describe pod kube-scheduler-ip-10-0-1-20.eu-west-1.compute.internal -n kube-system | tail -15 -``` - -## Calico components - -### View Calico CNI configuration on a node - -```bash -cat /etc/cni/net.d/10-calico.conflist -``` - -### Verify calicoctl matches cluster - -The cluster version and type must match the calicoctl version. - -```bash -calicoctl version -``` - -For syntax: - -```bash -calicoctl version -help -``` - -### Check tigera operator status - -```bash -kubectl get tigerastatus -``` - -``` -NAME AVAILABLE PROGRESSING DEGRADED SINCE -calico True False False 27h -``` - -### Check if operator pod is running - -```bash -kubectl get pod -n tigera-operator -``` - -### View calico nodes - -```bash -kubectl get pod -n calico-system -o wide -``` - -### View {{prodname}} installation parameters - -```bash -kubectl get installation -o yaml -``` - -```yaml -apiVersion: v1 -items: -- apiVersion: operator.tigera.io/v1 - kind: Installation - metadata: - - apiVersion: operator.tigera.io/v1 - spec: - calicoNetwork: - bgp: Enabled - hostPorts: Enabled - ipPools: - - blockSize: 26 - cidr: 10.48.0.0/16 - encapsulation: VXLANCrossSubnet - natOutgoing: Enabled - nodeSelector: all() - multiInterfaceMode: None - nodeAddressAutodetectionV4: - firstFound: true - cni: - ipam: - type: Calico - type: Calico -``` - -### Run commands across multiple nodes - -```bash -export THE_COMMAND_TO_RUN=date && for calinode in `kubectl get pod -o wide -n calico-system | grep calico-node | awk '{print $1}'`; do echo $calinode; echo "-----"; kubectl exec -n calico-system $calinode -- $THE_COMMAND_TO_RUN; printf "\n"; done -``` - -```bash -calico-node-87lpx ------ -Defaulted container "calico-node" out of: calico-node, flexvol-driver (init), install-cni (init) -Thu Apr 28 13:48:06 UTC 2022 - -calico-node-x5fmm ------ -Defaulted container "calico-node" out of: calico-node, flexvol-driver (init), install-cni (init) -Thu Apr 28 13:48:07 UTC 2022 - -``` - -### View pod info - -```bash -kubectl describe pods `` -n ` ` -``` - -```bash -kubectl describe pods busybox -n default -``` - -``` -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled 21s default-scheduler Successfully assigned default/busybox to ip-10-0-0-11 - Normal Pulling 20s kubelet, ip-10-0-0-11 Pulling image "busybox" - Normal Pulled 19s kubelet, ip-10-0-0-11 Successfully pulled image "busybox" - Normal Created 19s kubelet, ip-10-0-0-11 Created container busybox - Normal Started 18s kubelet, ip-10-0-0-11 Started container busybox -``` - -### View logs of a pod - -```bash -kubectl logs `` -n `` -``` - -```bash -kubectl logs busybox -n default -``` - -### View kubelet logs - -```bash -journalctl -u kubelet -``` - -## Routing - -### Verify routing table on the node - -```bash -ip route -``` - -``` -default via 10.0.0.1 dev eth0 proto dhcp src 10.0.0.10 metric 100 -10.0.0.0/24 dev eth0 proto kernel scope link src 10.0.0.10 -10.0.0.1 dev eth0 proto dhcp scope link src 10.0.0.10 metric 100 -10.48.66.128/26 via 10.0.0.12 dev eth0 proto 80 onlink -10.48.231.0/26 via 10.0.0.11 dev eth0 proto 80 onlink -172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 linkdown -``` - -### Verify BGP peer status - -```bash -sudo calicoctl node status -``` - -``` -Calico process is running. - -IPv4 BGP status -+--------------+-------------------+-------+------------+-------------+ -| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | -+--------------+-------------------+-------+------------+-------------+ -| 10.0.0.12 | node-to-node mesh | up | 2020-11-25 | Established | -| 10.0.0.11 | node-to-node mesh | up | 2020-11-25 | Established | -+--------------+-------------------+-------+------------+-------------+ -``` - -### Verify overlay configuration - -```bash -kubectl get ippools default-ipv4-ippool -o yaml -``` - -```yaml - ---- -spec: - ipipMode: Always - vxlanMode: Never -``` - -### Verify bgp learned routes - -```bash -ip r | grep bird -``` - -``` -192.168.66.128/26 via 10.0.0.12 dev tunl0 proto bird onlink -192.168.180.192/26 via 10.0.0.10 dev tunl0 proto bird onlink -blackhole 192.168.231.0/26 proto bird -``` - -### Verify BIRD routing table - -**Note**: The BIRD routing table gets pushed to node routing tables. - -```bash -kubectl exec -it -n calico-system calico-node-8cfc8 -- /bin/bash -``` - -``` -[root@ip-10-0-0-11 /] birdcl -BIRD v0.3.3+birdv1.6.8 ready. -bird> show route -0.0.0.0/0 via 10.0.0.1 on eth0 [kernel1 18:13:33] * (10) -10.0.0.0/24 dev eth0 [direct1 18:13:32] * (240) -10.0.0.1/32 dev eth0 [kernel1 18:13:33] * (10) -10.48.231.2/32 dev calieb874a8ef0b [kernel1 18:13:41] * (10) -10.48.231.1/32 dev caliaeaa173109d [kernel1 18:13:35] * (10) -10.48.231.0/26 blackhole [static1 18:13:32] * (200) -10.48.231.0/32 dev vxlan.calico [direct1 18:13:32] * (240) -10.48.180.192/26 via 10.0.0.10 on eth0 [Mesh_10_0_0_10 18:13:34] * (100/0) [i] - via 10.0.0.10 on eth0 [Mesh_10_0_0_12 18:13:41 from 10.0.0.12] (100/0) [i] - via 10.0.0.10 on eth0 [kernel1 18:13:33] (10) -10.48.66.128/26 via 10.0.0.12 on eth0 [Mesh_10_0_0_10 18:13:36 from 10.0.0.10] * (100/0) [i] - via 10.0.0.12 on eth0 [Mesh_10_0_0_12 18:13:41] (100/0) [i] - via 10.0.0.12 on eth0 [kernel1 18:13:36] (10) -``` - -### Capture traffic - -For example, - -```bash -sudo tcpdump -i calicofac0017c3 icmp -``` - -## Network policy - -### Verify existing Kubernetes network policies - -```bash -kubectl get networkpolicy --all-namespaces -``` - -``` -NAMESPACE NAME POD-SELECTOR AGE -client allow-ui 20m -client default-deny 4h51m -stars allow-ui 20m -stars backend-policy role=backend 20m -stars default-deny 4h51m -``` - -### Verify existing {{prodname}} network policies - -```bash -calicoctl get networkpolicy --all-namespaces -o wide -``` - -``` -NAMESPACE NAME ORDER SELECTOR -calico-demo allow-busybox 50 app == 'porter' -client knp.default.allow-ui 1000 projectcalico.org/orchestrator == 'k8s' -client knp.default.default-deny 1000 projectcalico.org/orchestrator == 'k8s' -stars knp.default.allow-ui 1000 projectcalico.org/orchestrator == 'k8s' -stars knp.default.backend-policy 1000 projectcalico.org/orchestrator == 'k8s' -stars knp.default.default-deny 1000 projectcalico.org/orchestrator == 'k8s' -``` - -### Verify existing {{prodname}} global network policies - -```bash -calicoctl get globalnetworkpolicy -o wide -``` - -``` -NAME ORDER SELECTOR -default-app-policy 100 -egress-lockdown 600 -default-node-policy 100 has(kubernetes.io/hostname) -nodeport-policy 100 has(kubernetes.io/hostname) -``` - -### Check policy selectors and order - -For example, - -```bash -calicoctl get np -n yaobank -o wide -``` - -If the selectors should match, check the endpoint IP and the node where it is running. For example, - -```bash -kubectl get pod -l app=customer -n yaobank -``` diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/component-logs.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/component-logs.mdx deleted file mode 100644 index be96f90ab4..0000000000 --- a/calico_versioned_docs/version-3.25/operations/troubleshoot/component-logs.mdx +++ /dev/null @@ -1,121 +0,0 @@ ---- -description: Where to find component logs. ---- - -# Component logs - -## Big picture - -View and collect {{prodname}} logs. - -## Value - -It is useful to view logs to monitor component health and diagnose potential issues. - -## Concepts - -### {{nodecontainer}} logs - -The {{nodecontainer}} logs contain log output from the following subcomponents: - -- Per-node startup logic -- BGP agent -- Felix policy agent - -Components log either to disk within `/var/log/calico`, to stdout, or both. - -For components that log to disk, files are automatically rotated, and by default 10 files of 1MB each are kept. The current log file is called `current` and rotated files have @ followed by a timestamp detailing when the files was rotated in [tai64n](http://cr.yp.to/libtai/tai64.html#tai64n) format. - -## How to - -## View logs for a {{nodecontainer}} instance - -You can view logs for a node using the `kubectl logs` command. This will show logs for all subcomponents of the given node. - -For example: - -``` -kubectl logs -n calico-system calico-node-xxxx -``` - -## View logs from the CNI plugin - -CNI plugin logs are not available through kubectl and are instead logged both to the host machine's disk as well as stderr. - -By default, these logs can be found at `/var/log/calico/cni/` on the host machine. - -The container runtime may also display the CNI plugin logs within its own log output. - -## Configure BGP agent log level - -BGP log level is configured via the [BGPConfiguration](../../reference/resources/bgpconfig.mdx) API, and can be one of the following values: - -- `Debug`: enables "debug all" logging for BIRD. The most verbose logging level. -- `Info`: enables logging for protocol state changes. This is the default log level. -- `Warning`: disables BIRD logging, emits warning level configuration logs only. -- `Error`: disables BIRD logging, emits error level configuration logs only. -- `Fatal`: disables BIRD logging, emits fatal level configuration logs only. - -To modify the BGP log level: - -1. Get the current bgpconfig settings. - - ```bash - kubectl get bgpconfig -o yaml > bgp.yaml - ``` - -1. Modify logSeverityScreen to the desired value. - - ```bash - vim bgp.yaml - ``` - - :::tip - - For a global change set the name to "default". - For a node-specific change set the name to the node name prefixed with "node.", e.g., "node.node-1". - - ::: - -1. Replace the current bgpconfig settings. - - ```bash - kubectl replace -f bgp.yaml - ``` - -## Configure Felix log level - -Felix log level is configured via the [FelixConfiguration](../../reference/resources/felixconfig.mdx) API, and can be one of the following values: - -- `Debug`: The most verbose logging level - for development and debugging. -- `Info`: The default log level. Shows important state changes. -- `Warning`: Shows warnings only. -- `Error`: Shows errors only. -- `Fatal`: Shows fatal errors only. - -To modify Felix's log level: - -1. Get the current felixconfig settings. - - ```bash - kubectl get felixconfig -o yaml > felixconfig.yaml - ``` - -1. Modify logSeverityScreen to desired value. - - ```bash - vim felixconfig.yaml - ``` - - :::tip - - For a global change set the name to "default". - For a node-specific change set the name to the node name, e.g., "{{prodname}}-Node-1". - - ::: - -1. Replace the current felixconfig settings. - - ```bash - kubectl replace -f felixconfig.yaml - ``` diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/index.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/index.mdx deleted file mode 100644 index fb1ae8c95e..0000000000 --- a/calico_versioned_docs/version-3.25/operations/troubleshoot/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Troubleshooting, logs, and diagnostics. -hide_table_of_contents: true ---- - -# Troubleshoot - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/troubleshooting.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/troubleshooting.mdx deleted file mode 100644 index bdc9c39295..0000000000 --- a/calico_versioned_docs/version-3.25/operations/troubleshoot/troubleshooting.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -description: View logs and diagnostics, common issues, and where to report issues in github. ---- - -# Troubleshooting and diagnostics - -## Logs and diagnostics - -To collect diagnostics use the `calicoctl` command line tool using superuser privileges. For example: - -```bash -sudo calicoctl node diags -``` - -To view logs, use the following command: - -`kubectl logs -n calico-system ` - -To view debug logs on some Calico components, set the `LogSeverityScreen` through the associated environment variable. - -To report a problem, please [open an issue in GitHub](https://github.com/projectcalico/calico/issues). - -### Check BGP peer status - -If you have connectivity between containers on the same host, and between -containers and the Internet, but not between containers on different hosts, it -probably indicates a problem in your BGP configuration. - -Look at `calicoctl node status` on each host. It should include output like this: - -``` -Calico process is running. - -IPv4 BGP status -+--------------+-------------------+-------+----------+-------------+ -| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | -+--------------+-------------------+-------+----------+-------------+ -| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established | -+--------------+-------------------+-------+----------+-------------+ - -IPv6 BGP status -No IPv6 peers found. -``` - -Alternatively, you can create a [`CalicoNodeStatus` resource](../../reference/resources/caliconodestatus.mdx) to get BGP session status for the node. - -If you do not see this, please check the following. - -- Make sure there is IP connectivity between your hosts. - -- Make sure your network allows the requisite BGP traffic on TCP port 179. - -### Configure NetworkManager - -Configure [NetworkManager](https://help.ubuntu.com/community/NetworkManager) before -attempting to use {{prodname}} networking. - -NetworkManager manipulates the routing table for interfaces in the default network -namespace where {{prodname}} veth pairs are anchored for connections to containers. -This can interfere with the {{prodname}} agent's ability to route correctly. - -Create the following configuration file at `/etc/NetworkManager/conf.d/calico.conf` to prevent -NetworkManager from interfering with the interfaces: - -```conf -[keyfile] -unmanaged-devices=interface-name:cali*;interface-name:tunl*;interface-name:vxlan.calico;interface-name:vxlan-v6.calico;interface-name:wireguard.cali;interface-name:wg-v6.cali -``` - -## Errors when running sudo calicoctl - -If you use `sudo` for commands, remember that your environment variables are not transferred to the `sudo` environment. You must run `sudo` with the `-E` flag to include your environment variables: - -```bash -sudo -E calicoctl node diags -``` - -or you can set environment variables for `sudo` commands like this: - -```bash -sudo ETCD_ENDPOINTS=http://172.25.0.1:2379 calicoctl node run -``` - -Also be aware that connection information can be specified as a config file rather than using environment variables. See [Installing calicoctl](../calicoctl/install.mdx) -for details. - -## Error: {{nodecontainer}} is not ready: BIRD is not ready: BGP not established with 10.0.0.1 - -In most cases, this "unready" status error in Kubernetes means that a particular peer is unreachable in the cluster. Check that BGP connectivity between the two peers is allowed in the environment. - -This error can also occur if inactive Node resources are configured for node-to-node mesh. To fix this, [decommission the stale nodes](../decommissioning-a-node.mdx). - -## Linux conntrack table is out of space - -A common problem on Linux systems is running out of space in the conntrack table, which can cause poor iptables performance. This can -happen if you run a lot of workloads on a given host, or if your workloads create a lot of TCP connections or bidirectional UDP streams. To avoid this problem, we recommend increasing the conntrack table size using the following commands: - -```bash -sysctl -w net.netfilter.nf_conntrack_max=1000000 -echo "net.netfilter.nf_conntrack_max=1000000" >> /etc/sysctl.conf -``` diff --git a/calico_versioned_docs/version-3.25/operations/troubleshoot/vpp.mdx b/calico_versioned_docs/version-3.25/operations/troubleshoot/vpp.mdx deleted file mode 100644 index cd643816fe..0000000000 --- a/calico_versioned_docs/version-3.25/operations/troubleshoot/vpp.mdx +++ /dev/null @@ -1,424 +0,0 @@ ---- -description: Specific troubleshooting steps for the VPP dataplane. ---- - -# VPP dataplane troubleshooting - -## Big picture - -This page describes the troubleshooting steps for the [VPP dataplane](../../getting-started/kubernetes/vpp/getting-started.mdx). If you did not configure the VPP dataplane, this page is not for you! - -If you're encountering issues with the VPP dataplane, feel free to reach out to us either on the [#vpp channel](https://calicousers.slack.com/archives/C017220EXU1) on the {{prodname}} slack, or by opening a new issue in [GitHub](https://github.com/projectcalico/vpp-dataplane/issues)). - -## Installing calivppctl - -`calivppctl` is a helper bash script shipped alongside vpp container images. It can be installed to your host with the following methods, and helps collecting logs and debugging a running cluster with the VPP dataplane installed. - -- With curl - -```bash -curl https://raw.githubusercontent.com/projectcalico/vpp-dataplane/{{vppbranch}}/test/scripts/vppdev.sh \ - | tee /usr/bin/calivppctl -chmod +x /usr/bin/calivppctl -``` - -- With docker (and a cluster with calico-vpp running) - -```bash -vppcontainer=$(docker ps | grep vpp_calico-vpp | awk '{ print $1 }') -docker cp ${vppcontainer}:/usr/bin/calivppctl /usr/bin/calivppctl -``` - -- With kubectl (and a cluster with calico-vpp running) - -```bash -vpppod=$(kubectl -n calico-vpp-dataplane get pods -o wide | grep calico-vpp-node- | awk '{ print $1 }' | head -1) -kubectl -n calico-vpp-dataplane exec -it ${vpppod} -c vpp -- cat /usr/bin/calivppctl | tee /usr/bin/calivppctl > /dev/null -chmod +x /usr/bin/calivppctl -``` - -## Troubleshooting - -### Kubernetes Cluster - -First you need to make sure Kubernetes is up and running. - -- `service kubelet status` should give you a first hint. -- Issues should be reported in the kubelet logs, which you can check with this command if you are using systemd: `journalctl -u kubelet -r -n200` - -:::note - -Kubernetes does not run with swap enabled. - -::: - -### Starting calico-vpp-node Daemon set - -Once the cluster is correctly started, the next issue can come from the Daemonset configuration. -Best is to start by inspecting the pods : are they running correctly ? -Usually configuration issues (available hugepages, memory, ...) will be reported here - -```bash -kubectl -n calico-vpp-dataplane describe pod/calico-vpp-node-XXXXX -``` - -:::note - -If at this point you don't have enough hugepages, you'll have to restart kubelet -after allocating them for taking it into account (using for instance `service kubelet restart`) - -::: - -### Having VPP up and running - -Once the pods don't report any issue, the pods should have started. There are two -containers for each node : VPP that starts the vpp process and setups connectivity, -and the agent handling pod connectivity, service load balancing, BGP, policies, etc. - -First check that VPP is running correctly. If the connectivity configuration, interface naming -is not correct, this will be reported here. Once this is running, you should be able to ping your other nodes through VPP. - -```bash - Print VPP's log : basic connectivity and NIC configuration -calivppctl log -vpp myk8node1 -``` - -Then you can check for any issues reported by the Agent (e.g. BGP listen issue -if the port is already taken, or missing configuration pieces). If this doesn't -show any errors, you should be able to `nslookup kubernetes.default` from pods. - -```bash - Print the logs for the {{prodname}} VPP dataplane agent, programming serviceIPs, BGP, ... -calivppctl log -agent myk8node1 -``` - -If all this doesn't play well you can always use the export to generate an export.tar.gz -bundle and ask for help on the [#vpp channel](https://calicousers.slack.com/archives/C017220EXU1) - -```bash -calivppctl export -``` - -## Accessing the VPP cli - -For further debugging, tracing packets and inspecting VPP's internals, you can -get a vpp shell using the following - -```bash -calivppctl vppctl myk8node1 -``` - -### Listing interfaces and basics - -To list existing interfaces and basic counters use - -``` -vpp show int -vpp show int addr -``` - -To get more insights on the main interface (e.g. if you're using dpdk) you can check -for errors & drops in - -``` -vpp show hardware-interfaces -``` - -Other places to look for errors - -``` -vpp show log # VPP startup log -vpp show err # Prints out packet counters (not always actual errors, but includes drops) -vpp show buffers # You should have non zero free buffers, otherwise traffic won't flow -``` - -## Tracing packets - -### Internal network layout - -For starters, here is a small schematic of how the network looks like: -![k8-calico-vpp](/img/calico/vpp-tracing-net.svg) - -Container interfaces are named `tun[0-9]+`. You can find which one belong to which container as follows. - -``` - Connect to vppctl -$ calivppctl vppctl NODENAME - - List interfaces -vpp show interface - Name Idx State MTU (L3/IP4/IP6/MPLS) Counter Count -avf-0/d8/a/0 1 up 9000/0/0/0 tx packets 2 - tx bytes 216 -local0 0 down 0/0/0/0 -tap0 2 up 0/0/0/0 rx packets 9 -[...] -tun3 5 up 0/0/0/0 rx packets 5 - rx bytes 431 - tx packets 5 - tx bytes 387 - ip4 5 - - Show the route for address 11.0.166.132 -vpp show ip fib 11.0.166.132 -ipv4-VRF:0, fib_index:0, flow hash:[src dst sport dport symmetric ] epoch:0 flags:none locks:[adjacency:1, default-route:1, ] -11.0.166.132/32 fib:0 index:19 locks:5 - cnat refs:1 entry-flags:uRPF-exempt,interpose, src-flags:added,contributing,active, cover:-1 interpose: - [@0]: [4] cnat-client:[11.0.166.132] tr:0 sess:1 - path-list:[26] locks:3 flags:shared, uPRF-list:24 len:1 itfs:[5, ] - path:[32] pl-index:26 ip4 weight=1 pref=0 attached-nexthop: oper-flags:resolved, cfg-flags:attached, - 11.0.166.132 tun3 (p2p) - [@0]: ipv4 via 0.0.0.0 tun3: mtu:9000 next:7 - [...] - - This one is behind `tun3` - If you want more info about this interface (name in Linux, queues, descriptors, ...) -vpp show tun tun3 -Interface: tun3 (ifindex 5) - name "eth0" - host-ns "/proc/17675/ns/net" - [...] -``` - -`tap0` is the interface providing connectivity to the host, using the original interface name on the Linux side (use `show tap tap0` and `show ip punt redirect`). - -### Capturing traffic inside the cluster - -Let's take the case of two pods talking to each other in your cluster (see the schema above). -You might want to inspect the traffic at 3 different locations : - -- as it exits the pod (in Linux inside the first pod) -- as it goes through VPP -- as it is received in the second pod (in Linux again) - -We cover the three cases, first inside VPP (depending on where your traffic is coming from : a pod or outside your host) -then inside your pods (usually with tcpdump) - -### Traffic capture inside VPP - -#### Traffic from a pod - -The following snippet will allow you to capture all traffic coming from containers on a particular node, grep from a specific packet, -and see what happened to it. - -```bash - Make sure that the trace buffer is clean in VPP -calivppctl vppctl NODENAME clear trace - Add a trace from the virtio-input input-node -calivppctl vppctl NODENAME trace add virtio-input 500 - generate some traffic -calivppctl vppctl NODENAME show trace max 500 > somefile - Grep for your IPs -cat somefile | grep '1.2.3.4 -> 5.6.7.8' -A40 -B40 -``` - -Output looks quite cumbersome at first as it contains the whole path of a packet through VPP, from reception to tx. - -``` -vpp show trace -Packet 1 - -00:09:46:518858: virtio-input - This packet has been received on the interface number #2 (column Idx in `show int`) - and is 688 Bytes long - virtio: hw_if_index 2 next-index 1 vring 0 len 688 - hdr: flags 0x00 gso_type 0x00 hdr_len 0 gso_size 0 csum_start 0 csum_offset 0 num_buffers 1 -00:09:46:518866: ip4-input - we read TCP header, addresses and ports - TCP: 20.0.0.1 -> 11.0.166.133 - tos 0x00, ttl 64, length 688, checksum 0x1bc5 dscp CS0 ecn NON_ECN - fragment id 0x56fd, flags DONT_FRAGMENT - TCP: 6443 -> 34112 - seq. 0xa1f93599 ack 0x818eb1c1 - flags 0x18 PSH ACK, tcp header: 32 bytes - window 502, checksum 0x00b7 -00:09:46:518870: ip4-lookup - fib 0 dpo-idx 5 flow hash: 0x00000000 - TCP: 20.0.0.1 -> 11.0.166.133 - tos 0x00, ttl 64, length 688, checksum 0x1bc5 dscp CS0 ecn NON_ECN - fragment id 0x56fd, flags DONT_FRAGMENT - TCP: 6443 -> 34112 - seq. 0xa1f93599 ack 0x818eb1c1 - flags 0x18 PSH ACK, tcp header: 32 bytes - window 502, checksum 0x00b7 -00:09:46:518873: ip4-cnat-tx - We need to do some NATing as it's Kubernetes - found: session:[20.0.0.1;6443 -> 11.0.166.133;34112, TCP] => 11.96.0.1;443 -> 11.0.166.133;34112 lb:-1 age:4190 -00:09:46:518879: ip4-rewrite - We rewrite the ip packet - mac addresses only when coming / going to a PHY, as tun interfaces are L3-only - tx_sw_if_index 6 dpo-idx 7 : ipv4 via 0.0.0.0 tun4: mtu:9000 next:8 flow hash: 0x00000000 - 00000000: 450002b056fd40003f0625650b6000010b00a68501bb8540a1f93599818eb1c1 - 00000020: 801801f620c700000101080a3f906c98fbaaba031703030277413d39 - Output happens on the interface `tun4` -00:09:46:518880: tun4-output - tun4 - 00000000: 450002b056fd40003f0625650b6000010b00a68501bb8540a1f93599818eb1c1 - 00000020: 801801f620c700000101080a3f906c98fbaaba031703030277413d39b97817c1 - 00000040: 41392fdbe0e9d4886849851476cdb8986362ee2f789bfefd8a5c106c898d1309 - 00000060: 4f8f8cb89159d99e986813a48d91334930eb5eb10ca4248c -00:09:46:518881: tun4-tx - buffer 0x24cf615: current data 0, length 688, buffer-pool 1, ref-count 1, totlen-nifb 0, trace handle 0x1000000 - ipv4 tcp hdr-sz 52 l2-hdr-offset 0 l3-hdr-offset 0 l4-hdr-offset 20 l4-hdr-sz 32 - 0x0b60: 40:00:3f:06:25:65 -> 45:00:02:b0:56:fd - -Packet 2 -[...] -``` - -#### Traffic from the phy - -If you want to capture traffic coming from the physical NIC, you should use `trace add` but with a different source node a.k.a `dpdk-input` `af-packet-input` `af_xdp-input` `avf-input` instead of `virtio-input`. - -`show run` should give you a hint of the `X-input` node you want to trace from. - -``` -vpp show run -Thread 1 vpp_wk_0 (lcore 25) -Time 1.9, 10 sec internal node vector rate 1.05 loops/sec 1074819.68 - vector rates in 7.5356e0, out 7.5356e0, drop 0.0000e0, punt 0.0000e0 - Name State Calls Vectors Suspends Clocks Vectors/Call -avf-input polling 2233530 0 0 8.24e1 0.00 -ip4-cnat-snat active 1 1 0 5.35e3 1.00 -ip4-cnat-tx active 14 15 0 1.18e3 1.07 -[...] - - Here we seem to want to use trace add avf-input 200 -``` - -Same as with traffic from a container, you can use - -```bash - Make sure that the trace buffer is clean in VPP -calivppctl vppctl NODENAME clear trace - Add a trace from the virtio-input input-node -calivppctl vppctl NODENAME trace add avf-input 500 - generate some traffic -calivppctl vppctl NODENAME show trace max 500 > somefile - Grep for your IPs -cat somefile | grep '1.2.3.4 -> 5.6.7.8' -A40 -B40 -``` - -#### With Wireshark - -Alternatively to the trace, you can do a capture and analyze it inside Wireshark. You can do this with: - -``` -vpp pcap dispatch trace on max 1000 file vppcapture buffer-trace dpdk-input 1000 -vpp pcap dispatch trace off -``` - -This will generate a file named `/tmp/vppcapture` - -Then on your host run: - -```bash -calivppctl sh vpp NODENAME -root@server:~ mv /tmp/vppcapture /var/lib/vpp/ -root@server:~ exit - The file should now be at /var/lib/vpp/vppcapture on your host 'NODENAME' -``` - -You can then `scp NODENAME:/var/lib/vpp/vppcapture .` on your machine and open it with Wireshark -[More info about this here](https://haryachyy.wordpress.com/2019/09/29/learning-vpp-trace-with-wireshark/) - -### Traffic received in the pods - -To inspect traffic actually received by the pods (if `tcpdump` is installed in the pod), simply run `tcpdump -ni eth0` inside the pod. If tcpdump is not available in the pod, here are two options to still be able to capture pod traffic: - -#### Tcpdump is available on the host - -Provided that you have `tcpdump` installed on the host, you can use `nsenter` to attach to the pod's network namespace and use the host's `tcpdump` on the container's interface. - -This works on docker as follows : - -```bash - - Find the container ID you want to inspect -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -4c01db0b339c ubuntu:12.04 bash 17 seconds ago Up 16 seconds 3300-3310/tcp webapp - - Get the container PID out of it -$ docker inspect --format '{{ .State.Pid }}' 4c01db0b339c -12345 - - Attach -$ nsenter -t 12345 -n bash - -``` - -#### No tcpdump, but we have python ! - -Open an AF_PACKET socket in python with the following code -and run it attached to the running namespace as previously. - -```python -#!/usr/bin/env python -from socket import * -from struct import unpack - -IFNAME = "eth0" -N_PKT = 50 -MTU=1500 - -sock = socket(AF_PACKET, SOCK_DGRAM, 0x0800) -sock.bind((IFNAME, 0x0800)) -for _ in range(N_PKT): - data = sock.recvfrom(MTU, 0)[0] - src_addr = inet_ntop(AF_INET, data[12:16]) - dst_addr = inet_ntop(AF_INET, data[16:20]) - src_port, = unpack("!H", data[20:22]) - dst_port, = unpack("!H", data[22:24]) - data_len, = unpack("!H", data[24:26]) - cksum, = unpack("!H", data[26:28]) - - print("%s:%d -> %s:%d len %d cs %d" % (src_addr, src_port, dst_addr, dst_port, data_len, cksum)) -``` - -This requires privileges and thus is usually easier to run from the host. From the host, you can use `echo "the python blob above" | nsenter -t -n python` to execute this code. - -### Traffic to the kubelet agent - -As the kubelet agent runs directly on the host without a network namespace, pods talking to it (e.g. coredns resolvers) would go through a specific path. Packets destined to it will be caught by VPP's punt mechanism, and will be forwarded to the host through a tap interface which will have the same name as the original interface in Linux. - -To debug traffic within VPP, use the trace & check that traffic is correctly punted to the tap0 interface. - -On the host, you can use `tcpdump` normally to check the traffic. - -## Crashes & coredumps - -To instruct vpp to leave a coredump in the event of a crash, you can pass the `CALICOVPP_CORE_PATTERN` environment variable to the vpp container: - -```yaml -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: calico-vpp-node - ---- -- name: vpp - env: - - name: CALICOVPP_CORE_PATTERN - value: '/home/hostuser/vppcore.%e.%p' - volumeMounts: - - name: userhome - mountPath: /home/hostuser - ---- -volumes: - - name: userhome - hostPath: - path: ${SOME_DIRECTORY} -``` - -This will generate a `vppcore.vpp_main.` file in `${DIR}` if vpp aborts unexpectedly. If you encounter this situation, please note the exact version of the vpp image that generated the corefile (using the image hash) to facilitate further troubleshooting. - -To explore it run: - -```bash -docker run -it --entrypoint=bash -v $DIR/vppcore.vpp_main.12345:/root/vppcore calicovpp/vpp:VERSION - You should have a shell inside the vpp container -apt update && apt install -y gdb -gdb vpp ./vppcore -``` diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/index.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/index.mdx deleted file mode 100644 index 20bc2723b1..0000000000 --- a/calico_versioned_docs/version-3.25/operations/upgrading/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Upgrade to a newer version of Calico. -hide_table_of_contents: true ---- - -# Upgrade - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/kubernetes-upgrade.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/kubernetes-upgrade.mdx deleted file mode 100644 index 0f93b9224a..0000000000 --- a/calico_versioned_docs/version-3.25/operations/upgrading/kubernetes-upgrade.mdx +++ /dev/null @@ -1,267 +0,0 @@ ---- -description: Upgrade to a newer version of Calico for Kubernetes. ---- - -# Upgrade Calico on Kubernetes - -## About upgrading {{prodname}} - -This page describes how to upgrade to {{version}} from {{prodname}} v3.0 or later. The -procedure varies by datastore type and install method. - -If you are using {{prodname}} in etcd mode on a Kubernetes cluster, we recommend upgrading to the Kubernetes API datastore [as discussed here](../datastore-migration.mdx). - -If you have installed {{prodname}} using the `calico.yaml` manifest, we recommend upgrading to the {{prodname}} operator, [as discussed here](../operator-migration.mdx). - -- [Upgrading an installation that was installed using Helm](#upgrading-an-installation-that-was-installed-using-helm) - -- [Upgrading an installation that uses the operator](#upgrading-an-installation-that-uses-the-operator) - -- [Upgrading an installation that uses manifests and the Kubernetes API datastore](#upgrading-an-installation-that-uses-manifests-and-the-kubernetes-api-datastore) - -- [Upgrading an installation that connects directly to an etcd datastore](#upgrading-an-installation-that-uses-an-etcd-datastore) - -:::note - -Do not use older versions of `calicoctl` after the upgrade. -This may result in unexpected behavior and data. - -::: - - - -## Upgrading an installation that was installed using helm - -Prior to release v3.23, the Calico helm chart itself deployed the `tigera-operator` namespace and required that the helm release was -installed in the `default` namespace. Newer releases properly defer creation of the `tigera-operator` namespace to the user and allow installation -of the chart into the `tigera-operator` namespace. - -When upgrading from a version of Calico v3.22 or lower to a version of Calico v3.23 or greater, you must complete the following steps to migrate -ownership of the helm resources to the new chart location. - -### Upgrade from Calico versions prior to v3.23.0 - -1. Patch existing resources so that the new chart can assume ownership. - - ``` - kubectl patch installation default --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}' - kubectl patch apiserver default --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}' - kubectl patch podsecuritypolicy tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}' - kubectl patch -n tigera-operator deployment tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}' - kubectl patch -n tigera-operator serviceaccount tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}' - kubectl patch clusterrole tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}' - kubectl patch clusterrolebinding tigera-operator tigera-operator --type=merge -p '{"metadata": {"annotations": {"meta.helm.sh/release-namespace": "tigera-operator"}}}' - ``` - -1. Apply the {{version}} CRDs: - - ```bash - kubectl apply --server-side --force-conflicts -f {{manifestsUrl}}/manifests/operator-crds.yaml - ``` - -1. Install the helm chart in the `tigera-operator` namespace. - - ``` - helm install {{prodnamedash}} projectcalico/tigera-operator --version {{releaseTitle}} --namespace tigera-operator - ``` - -1. Once the install has succeeded, you can delete any old releases in the `default` namespace. - - ``` - kubectl delete secret -n default -l name=calico,owner=helm --dry-run - ``` - -:::note - -The above command uses --dry-run to avoid making changes to your cluster. We recommend reviewing -the output and then re-running the command without --dry-run to commit to the changes. - -::: - -### All other upgrades - -1. Apply the {{version}} CRDs: - - ```bash - kubectl apply --server-side --force-conflicts -f {{manifestsUrl}}/manifests/operator-crds.yaml - ``` - -1. Run the helm upgrade: - - ```bash - helm upgrade {{prodnamedash}} projectcalico/tigera-operator - ``` - -## Upgrading an installation that uses the operator - -1. Download the {{version}} operator manifest. - - ```bash - curl {{manifestsUrl}}/manifests/tigera-operator.yaml -O - ``` - -1. Use the following command to initiate an upgrade. - - ```bash - kubectl apply --server-side --force-conflicts -f tigera-operator.yaml - ``` - -## Upgrading an installation that uses manifests and the Kubernetes API datastore - -1. Download the {{version}} manifest that corresponds to your original installation method. - - **{{prodname}} for policy and networking** - - ```bash - curl {{manifestsUrl}}/manifests/calico.yaml -o upgrade.yaml - ``` - - **{{prodname}} for policy and flannel for networking** - - ```bash - curl {{manifestsUrl}}/manifests/canal.yaml -o upgrade.yaml - ``` - - **{{prodname}} for policy (advanced)** - - ```bash - curl {{manifestsUrl}}/manifests/calico-policy-only.yaml -o upgrade.yaml - ``` - - :::note - - If you manually modified the manifest, you must manually apply the - same changes to the downloaded manifest. - - ::: - -1. Use the following command to initiate a rolling update. - - ```bash - kubectl apply --server-side --force-conflicts -f upgrade.yaml - ``` - -1. Watch the status of the upgrade as follows. - - ```bash - watch kubectl get pods -n kube-system - ``` - - Verify that the status of all {{prodname}} pods indicate `Running`. - - ```bash - {{noderunning}}-hvvg8 2/2 Running 0 3m - {{noderunning}}-vm8kh 2/2 Running 0 3m - {{noderunning}}-w92wk 2/2 Running 0 3m - ``` - -1. Remove any existing `calicoctl` instances, [install the new `calicoctl`](../calicoctl/install.mdx) - and [configure it to connect to your datastore](../calicoctl/configure/overview.mdx). - -1. Use the following command to check the {{prodname}} version number. - - ```bash - calicoctl version - ``` - - It should return a `Cluster Version` of `{{version}}.x`. - -1. If you have [enable application layer policy](../../network-policy/istio/app-layer-policy.mdx), - follow [the instructions below](#upgrading-if-you-have-application-layer-policy-enabled) to complete your upgrade. Skip this if you are not using Istio with {{prodname}}. - -1. If you were upgrading from a version of Calico prior to v3.14 and followed the pre-upgrade steps for host endpoints above, review traffic logs from the temporary policy, - add any global network policies needed to allow traffic, and delete the temporary network policy **allow-all-upgrade**. - -1. Congratulations! You have upgraded to {{prodname}} {{version}}. - -## Upgrading an installation that uses an etcd datastore - -1. Download the {{version}} manifest that corresponds to your original installation method. - - **{{prodname}} for policy and networking** - - ```bash - curl {{manifestsUrl}}/manifests/calico-etcd.yaml -o upgrade.yaml - ``` - - **{{prodname}} for policy and flannel for networking** - - ```bash - curl {{manifestsUrl}}/manifests/canal-etcd.yaml -o upgrade.yaml - ``` - - :::note - - You must manually apply the changes you made to the manifest - during installation to the downloaded {{version}} manifest. At a minimum, - you must set the `etcd_endpoints` value. - - ::: - -1. Use the following command to initiate a rolling update. - - ```bash - kubectl apply --server-side --force-conflicts -f upgrade.yaml - ``` - -1. Watch the status of the upgrade as follows. - - bash - ``` - watch kubectl get pods -n kube-system - ``` - - Verify that the status of all {{prodname}} pods indicate `Running`. - - ``` - calico-kube-controllers-6d4b9d6b5b-wlkfj 1/1 Running 0 3m - {{noderunning}}-hvvg8 1/2 Running 0 3m - {{noderunning}}-vm8kh 1/2 Running 0 3m - {{noderunning}}-w92wk 1/2 Running 0 3m - ``` - - :::tip - - The {{noderunning}} pods will report `1/2` in the `READY` column, as shown. - - ::: - -1. Remove any existing `calicoctl` instances, [install the new `calicoctl`](../calicoctl/install.mdx) - and [configure it to connect to your datastore](../calicoctl/configure/overview.mdx). - -1. Use the following command to check the {{prodname}} version number. - - ```bash - calicoctl version - ``` - - It should return a `Cluster Version` of `{{version}}`. - -1. If you have [enabled application layer policy](../../network-policy/istio/app-layer-policy.mdx), - follow [the instructions below](#upgrading-if-you-have-application-layer-policy-enabled) to complete your upgrade. Skip this if you are not using Istio with {{prodname}}. - -1. If you were upgrading from a version of Calico prior to v3.14 and followed the pre-upgrade steps for host endpoints above, review traffic logs from the temporary policy, - add any global network policies needed to allow traffic, and delete the temporary network policy **allow-all-upgrade**. - -1. Congratulations! You have upgraded to {{prodname}} {{version}}. - -## Upgrading if you have Application Layer Policy enabled - -Dikastes is versioned the same as the rest of {{prodname}}, but an upgraded `calico-node` will still be able to work with a downlevel Dikastes -so that you will not lose data plane connectivity during the upgrade. Once `calico-node` is upgraded, you can begin redeploying your service pods -with the updated version of Dikastes. - -If you have [enabled application layer policy](../../network-policy/istio/app-layer-policy.mdx), -take the following steps to upgrade the Dikastes sidecars running in your application pods. Skip these steps if you are not using Istio with {{prodname}}. - -1. Update the Istio sidecar injector template to use the new version of Dikastes. Replace `` below with - the full version string of your Istio install, for example `1.4.2`. - - ```bash - kubectl apply -f {{manifestsUrl}}/manifests/alp/istio-inject-configmap-.yaml - ``` - -1. Once the new template is in place, newly created pods use the upgraded version of Dikastes. Perform a rolling update of each of your service deployments - to get them on the new version of Dikastes. - - diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/openshift-upgrade.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/openshift-upgrade.mdx deleted file mode 100644 index ae7fe6ea10..0000000000 --- a/calico_versioned_docs/version-3.25/operations/upgrading/openshift-upgrade.mdx +++ /dev/null @@ -1,38 +0,0 @@ ---- -description: Upgrade to a newer version of Calico for OpenShift. ---- - -# Upgrade Calico on OpenShift 4 - -## About upgrading {{prodname}} - -This page describes how to upgrade to {{version}} for OpenShift 4 from an existing {{prodname}} cluster. - - - -## Upgrading Calico on OpenShift 4 - -Make a manifests directory. - -```bash -mkdir manifests -``` - - - -Apply the updated manifests. - -```bash -oc apply -f manifests/ -``` - -You can now monitor the upgrade progress with the following command: - -```bash -watch oc get tigerastatus -``` - -If you were upgrading from a version of Calico prior to v3.14 and followed the pre-upgrade steps for host endpoints above, review traffic logs from the temporary policy, -add any global network policies needed to allow traffic, and delete the temporary network policy **allow-all-upgrade**. - - diff --git a/calico_versioned_docs/version-3.25/operations/upgrading/openstack-upgrade.mdx b/calico_versioned_docs/version-3.25/operations/upgrading/openstack-upgrade.mdx deleted file mode 100644 index 9d75f97bd8..0000000000 --- a/calico_versioned_docs/version-3.25/operations/upgrading/openstack-upgrade.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -description: Upgrade to a newer version of Calico for OpenStack. ---- - -# Upgrade Calico on OpenStack - -## {{prodname}} package update - -This page describes how to upgrade to {{version}} from {{prodname}} v3.0 or later. The procedure -varies by Linux distribution. - -- [Upgrading an OpenStack cluster based on CentOS](#upgrading-an-openstack-cluster-based-on-centos) - -- [Upgrading an OpenStack cluster based on Ubuntu](#upgrading-an-openstack-cluster-based-on-ubuntu) - -:::note - -Do not use older versions of `calicoctl` after the upgrade. -This may result in unexpected behavior and data. - -::: - -## Upgrading an OpenStack cluster based on CentOS - -1. On all nodes, change the location of the {{prodname}} packages to point to the {{version}} repo: - - ``` - sudo sed -i 's/calico-X.X/calico-Y.Y/g' /etc/yum.repos.d/calico.repo - ``` - - Replace `X.X` in the above command with the version you're upgrading from (must be v3.0 or later). - Replace `Y.Y` with the version of the release you're upgrading to. Example: if you are upgrading from v3.1 - to v3.5, replace `X.X` with `3.1` and replace `Y.Y` with `3.5`. - -1. On all compute nodes, update packages: - - ``` - sudo yum update - ``` - - We recommend upgrading the whole distribution as shown here. In case you prefer to upgrade particular packages only, those needed for a {{prodname}} compute node are the following. - - - `calico-common` - - `calico-compute` - - `calico-dhcp-agent` - - `calico-felix` - - `dnsmasq` - - `networking-calico` - - `openstack-neutron` - - `openstack-nova-api` - - `openstack-nova-compute` -
    -
    - -1. Use the following command on the compute nodes to confirm that Felix has upgraded to {{version}}. - - ``` - calico-felix --version - ``` - - It should return `{{version}}`. - -1. On all compute nodes, add the following line to the end of `/etc/calico/felix.cfg`: - - ``` - DatastoreType = etcdv3 - ``` - - If you need to change the EtcdEndpoints address (e.g. because you've installed a new etcdv3 cluster - rather than upgrading your existing etcdv2 cluster), you should update the EtcdEndpoints addresses - in `/etcd/calico/felix.cfg` at this point. - -1. On all control nodes, update packages: - - ``` - sudo yum update - ``` - - We recommend upgrading the whole distribution as shown here. In case you prefer to upgrade particular packages only, those needed for a {{prodname}} control node are the following. - - - `calico-common` - - `calico-control` - - `networking-calico` - - `openstack-neutron` -
    -
    - -1. On all control nodes, restart `neutron-server`: - - ``` - sudo systemctl restart neutron-server - ``` - -1. If you ran `calico-upgrade` earlier to migrate non-openstack data, on the control node run: - - ``` - calico-upgrade complete - ``` - -1. Remove any existing `calicoctl` instances and [install the new `calicoctl`](../calicoctl/install.mdx). - -1. Congratulations! You have upgraded to {{prodname}} {{version}}. - -## Upgrading an OpenStack cluster based on Ubuntu - -1. On all nodes, change the location of the {{prodname}} packages to point to the {{version}} repo: - - ``` - sudo bash -c 'cat > /etc/apt/sources.list.d/project-calico-calico-X_X-trusty.list' << EOF - deb http://ppa.launchpad.net/project-calico/calico-X.X/ubuntu trusty main - # deb-src http://ppa.launchpad.net/project-calico/calico-X.X/ubuntu trusty main - EOF - ``` - - Replace `X_X` and `X.X` with the version you're upgrading to. Example: if you're upgrading to v3.5, replace `X_X` with - `3_5` and replace `X.X` with `3.5`. Also replace `trusty` with the code name of your Ubuntu version. - -1. On all compute nodes, update packages: - - ``` - sudo apt-get update - sudo apt-get install calico-compute calico-felix calico-common \ - python-etcd networking-calico calico-dhcp-agent - - ``` - -1. Use the following command on the compute nodes to confirm that Felix has upgraded to {{version}}. - - ``` - calico-felix --version - ``` - - It should return `{{version}}`. - -1. On all compute nodes, add the following line to the end of `/etc/calico/felix.cfg`: - - ``` - DatastoreType = etcdv3 - ``` - - If you need to change the EtcdEndpoints address (e.g. because you've installed a new etcdv3 cluster - rather than upgrading your existing etcdv2 cluster), you should update the EtcdEndpoints addresses - in `/etcd/calico/felix.cfg` at this point. - -1. On all control nodes, update packages: - - ``` - sudo apt-get update - sudo apt-get install calico-control calico-common python-etcd networking-calico - ``` - -1. On all control nodes, restart `neutron-server`: - - ``` - sudo service neutron-server restart - ``` - -1. If you ran `calico-upgrade` earlier to migrate non-openstack data, on the control node run: - - ``` - calico-upgrade complete - ``` - -1. Remove any existing `calicoctl` instances and [install the new `calicoctl`](../calicoctl/install.mdx). - -1. Congratulations! You have upgraded to {{prodname}} {{version}}. diff --git a/calico_versioned_docs/version-3.25/reference/api.mdx b/calico_versioned_docs/version-3.25/reference/api.mdx deleted file mode 100644 index 43939af0ce..0000000000 --- a/calico_versioned_docs/version-3.25/reference/api.mdx +++ /dev/null @@ -1,12 +0,0 @@ ---- -description: Learn about the Calico API and how to use it. ---- - -# Calico API - -{{prodname}} provides and consumes a public API in Go that allows -developers to work with {{prodname}} resources. - -To learn more about the {{prodname}} API and how to use it, see the Calico API project [README](https://github.com/projectcalico/api/blob/master/README.md) or -the [github.com/projectcalico/api Go module page](https://pkg.go.dev/github.com/projectcalico/api) -. diff --git a/calico_versioned_docs/version-3.25/reference/architecture/data-path.mdx b/calico_versioned_docs/version-3.25/reference/architecture/data-path.mdx deleted file mode 100644 index c66f5b0f5b..0000000000 --- a/calico_versioned_docs/version-3.25/reference/architecture/data-path.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: Learn how packets flow between workloads in a datacenter, or between a workload and the internet. ---- - -# 'The Calico data path: IP routing and iptables' - -One of {{prodname}}’s key features is how packets flow between workloads in a -data center, or between a workload and the Internet, without additional -encapsulation. - -In the {{prodname}} approach, IP packets to or from a workload are routed and -firewalled by the Linux routing table and iptables or eBPF infrastructure on the -workload’s host. For a workload that is sending packets, {{prodname}} ensures -that the host is always returned as the next hop MAC address regardless -of whatever routing the workload itself might configure. For packets -addressed to a workload, the last IP hop is that from the destination -workload’s host to the workload itself. - -![Calico datapath](/img/calico/calico-datapath.png) - -Suppose that IPv4 addresses for the workloads are allocated from a -datacenter-private subnet of 10.65/16, and that the hosts have IP -addresses from 172.18.203/24. If you look at the routing table on a host: - -```bash -route -n -``` - -You will see something like this: - -``` -Kernel IP routing table -Destination Gateway Genmask Flags Metric Ref Use Iface -0.0.0.0 172.18.203.1 0.0.0.0 UG 0 0 0 eth0 -10.65.0.0 0.0.0.0 255.255.0.0 U 0 0 0 ns-db03ab89-b4 -10.65.0.21 172.18.203.126 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.22 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.23 172.18.203.129 255.255.255.255 UGH 0 0 0 eth0 -10.65.0.24 0.0.0.0 255.255.255.255 UH 0 0 0 tapa429fb36-04 -172.18.203.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 -``` - -There is one workload on this host with IP address 10.65.0.24, and -accessible from the host via a TAP (or veth, etc.) interface named -tapa429fb36-04. Hence there is a direct route for 10.65.0.24, through -tapa429fb36-04. Other workloads, with the .21, .22 and .23 addresses, -are hosted on two other hosts (172.18.203.126 and .129), so the routes -for those workload addresses are via those hosts. - -The direct routes are set up by a {{prodname}} agent named Felix when it is -asked to provision connectivity for a particular workload. A BGP client -(such as BIRD) then notices those and distributes them – perhaps via a -route reflector – to BGP clients running on other hosts, and hence the -indirect routes appear also. - -## Is that all? - -As far as the static data path is concerned, yes. It’s just a -combination of responding to workload ARP requests with the host MAC, IP -routing and iptables or eBPF. There’s a great deal more to {{prodname}} in terms of -how the required routing and security information is managed, and for -handling dynamic things such as workload migration – but the basic data -path really is that simple. diff --git a/calico_versioned_docs/version-3.25/reference/architecture/design/index.mdx b/calico_versioned_docs/version-3.25/reference/architecture/design/index.mdx deleted file mode 100644 index 717ac9f295..0000000000 --- a/calico_versioned_docs/version-3.25/reference/architecture/design/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Deep dive into using Calico over Ethernet and IP fabrics. -hide_table_of_contents: true ---- - -# Network design - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/architecture/design/l2-interconnect-fabric.mdx b/calico_versioned_docs/version-3.25/reference/architecture/design/l2-interconnect-fabric.mdx deleted file mode 100644 index 422372357d..0000000000 --- a/calico_versioned_docs/version-3.25/reference/architecture/design/l2-interconnect-fabric.mdx +++ /dev/null @@ -1,117 +0,0 @@ ---- -description: Understand the interconnect fabric options in a Calico network. ---- - -# Calico over Ethernet fabrics - -Any technology that is capable of transporting IP packets can be used as the interconnect fabric in a {{prodname}} network. This means that the standard tools used to transport IP, such as MPLS and Ethernet can be used in a {{prodname}} network. - -The focus of this article is on Ethernet as the interconnect network. Most at-scale cloud operators have converted to IP fabrics, and that infrastructure will work for {{prodname}} as well. However, the concerns that drove most of those operators to IP as the interconnection network in their pods are largely ameliorated by {{prodname}}, allowing Ethernet to be viably considered as a {{prodname}} interconnect, even in large-scale deployments. - -## Concerns over Ethernet at scale - -It has been acknowledged by the industry for years that, beyond a certain size, classical Ethernet networks are unsuitable for production deployment. Although there have been [multiple](https://en.wikipedia.org/wiki/Provider_Backbone_Bridge_Traffic_Engineering) [attempts](https://web.archive.org/web/20150923231827/https://www.cisco.com/web/about/ac123/ac147/archived_issues/ipj_14-3/143_trill.html) [to address](https://en.wikipedia.org/wiki/Virtual_Private_LAN_Service) these issues, the scale-out networking community has largely abandoned Ethernet for anything other than providing physical point-to-point links in the networking fabric. The principle reasons for Ethernet failures at large scale are: - -- Large numbers of _endpoints_ ([note 1](#note-1)) - - Each switch in an Ethernet network must learn the path to all Ethernet endpoints that are connected to the Ethernet network. Learning this amount of state can become a substantial task when we are talking about hundreds of thousands of _endpoints_. - -- High rate of _churn_ or change in the network - - With that many endpoints, most of them being ephemeral (such as virtual machines or containers), there is a large amount of _churn_ in the network. That load of re-learning paths can be a substantial burden on the control plane processor of most Ethernet switches. - -- High volumes of broadcast traffic - - As each node on the Ethernet network must use Broadcast packets to locate peers, and many use broadcast for other purposes, the resultant packet replication to each and every endpoint can lead to _broadcast storms_ in large Ethernet networks, effectively consuming most, if not all resources in the network and the attached endpoints. - -- Spanning tree - - Spanning tree is the protocol used to keep an Ethernet network from forming loops. The protocol was designed in the era of smaller, simpler networks, and it has not aged well. As the number of links and interconnects in an Ethernet network goes up, many implementations of spanning tree become more _fragile_. Unfortunately, when spanning tree fails in an Ethernet network, the effect is a catastrophic loop or partition (or both) in the network, and, in most cases, difficult to troubleshoot or resolve. - -Although many of these issues are crippling at _VM scale_ (tens of thousands of endpoints that live for hours, days, weeks), they will be absolutely lethal at _container scale_ (hundreds of thousands of endpoints that live for seconds, minutes, days). - -If you weren't ready to turn off your Ethernet data center network before this, I bet you are now. Before you do, however, let's look at how {{prodname}} can mitigate these issues, even in very large deployments. - -## How does {{prodname}} tame the Ethernet daemons? - -First, let's look at how {{prodname}} uses an Ethernet interconnect fabric. It's important to remember that an Ethernet network _sees_ nothing on the other side of an attached IP router, the Ethernet network just _sees_ the router itself. This is why Ethernet switches can be used at Internet peering points, where large fractions of Internet traffic is exchanged. The switches only see the routers from the various ISPs, not those ISPs' customers' nodes. We leverage the same effect in {{prodname}}. - -To take the issues outlined above, let's revisit them in a {{prodname}} -context. - -- Large numbers of endpoints - - In a {{prodname}} network, the Ethernet interconnect fabric only sees the routers/compute servers, not the - endpoint. In a standard cloud model, where there is tens of VMs per server (or hundreds of containers), this reduces the number of nodes that the Ethernet sees (and has to learn) by one to two orders - of magnitude. Even in very large pods (say twenty thousand servers), the Ethernet network would still only see a few tens of thousands of endpoints. Well within the scale of any competent data center - Ethernet top of rack (ToR) switch. - -- High rate of churn - - In a classical Ethernet data center fabric, there is a _churn_ event each time an endpoint is created, - destroyed, or moved. In a large data center, with hundreds of thousands of endpoints, this _churn_ could run into tens of events per second, every second of the day, with peaks easily in the hundreds or thousands of events per second. In a {{prodname}} network, however, the _churn_ is very low. The only event that would lead to _churn_ - orders of magnitude more than what is normally experienced), there would only be two thousand events per **day**. Any switch that cannot handle that volume of change in the network should not be used - for any application. - -- High volume of broadcast traffic - - Because the first (and last) hop for any traffic in a {{prodname}} network is an IP hop, and IP hops terminate - broadcast traffic, there is no endpoint broadcast network in the Ethernet fabric, period. In fact, the only broadcast traffic that should be seen in the Ethernet fabric is the ARPs of the compute servers locating each other. If the traffic pattern is fairly consistent, the steady-state ARP rate should be almost zero. Even in a pathological case, the ARP rate should be well within normal accepted boundaries. - -- Spanning tree - - Depending on the architecture chosen for the Ethernet fabric, it may even be possible to turn off spanning tree. However, even if it is left on, due to the reduction in node count, and reduction in churn, most competent spanning tree implementations should be able to handle the load without stress. - -With these considerations in mind, it should be evident that an Ethernet connection fabric in {{prodname}} is not only possible, it is practical and should be seriously considered as the interconnect fabric for a {{prodname}} -network. - -As mentioned in the IP fabric post, an IP fabric is also quite feasible for {{prodname}}, but there are more considerations that must be taken into account. The Ethernet fabric option has fewer architectural considerations in its design. - -## A brief note about Ethernet topology - -As mentioned elsewhere in the {{prodname}} documentation, because {{prodname}} can use most of the standard IP tooling, some interesting options regarding fabric topology become possible. - -We assume that an Ethernet fabric for {{prodname}} would most likely be constructed as a _leaf/spine_ architecture. Other options are possible, but the _leaf/spine_ is the predominant architectural model in use in -scale-out infrastructure today. - -Because {{prodname}} is an IP routed fabric, a {{prodname}} network can use [ECMP](https://en.wikipedia.org/wiki/Equal-cost_multi-path_routing) to distribute traffic across multiple links (instead of using Ethernet techniques such as MLAG). By leveraging ECMP load balancing on the {{prodname}} compute servers, it is possible to build the fabric out of multiple _independent_ leaf/spine planes using no technologies other than IP routing in the {{prodname}} nodes, and basic Ethernet switching in the interconnect fabric. These planes would operate completely independently and could be designed such that they would not share a fault domain. This would allow for the catastrophic failure of one (or more) plane(s) of Ethernet interconnect fabric without the loss of the pod (the failure would just decrease the amount of interconnect bandwidth in the pod). This is a gentler failure mode than the pod-wide IP or Ethernet failure that is possible with today's designs. - -You might find this [Facebook blog post](https://code.facebook.com/posts/360346274145943/introducing-data-center-fabric-the-next-generation-facebook-data-center-network/) - on their fabric approach interesting. A graphic to visualize the idea is shown below. - -![Ethernet spine planes](/img/calico/l2-spine-planes.png) - -The diagram does not show the endpoints in this diagram, and the endpoints would be unaware of anything in the fabric (as noted above). - -In this diagram, each ToR is segmented into four logical switches (possibly by using 'port VLANs'), ([note 2](#note-2)) and each compute server has a connection to each of those logical switches. We will identify those logical switches by their color. Each ToR would then have a blue, green, orange, and red logical switch. Those 'colors' would be members of a given _plane_, so there would be a blue plane, a green plane, an orange plane, and a red plane. Each plane would have a dedicated spine switch. and each ToR in a given spine would be connected to its spine, and only its spine. - -Each plane would constitute an IP network, so the blue plane would be 2001:db8:1000::/36, the green would be 2001:db8:2000::/36, and the orange and red planes would be 2001:db8:3000::/36 and 2001:db8:4000::/36 respectively ([note 3](#note-3)). - -Each IP network (plane) requires its own BGP route reflectors. Those route reflectors need to be peered with each other within the plane, but the route reflectors in each plane do not need to be peered with one another. Therefore, a fabric of four planes would have four route reflector meshes. Each compute server, border router, _etc._ would need -to be a route reflector client of at least one route reflector in each plane, and very preferably two or more in each plane. - -The following diagram visualizes the route reflector environment. - -![route-reflector](/img/calico/l2-rr-spine-planes.png) - -These route reflectors could be dedicated hardware connected to the spine switches (or the spine switches themselves), or physical or virtual route reflectors connected to the necessary logical leaf switches (blue, green, orange, and red). That may be a route reflector running on a compute server and connected directly to the correct plane link, and not routed through the vRouter, to avoid the chicken and egg problem that would occur if the route reflector were "behind" the {{prodname}} network. - -Other physical and logical configurations and counts are, of course, possible, this is just an example. - -The logical configuration would then have each compute server would have an address on each plane's subnet, and announce its endpoints on each subnet. If ECMP is then turned on, the compute servers would distribute the load across all planes. - -If a plane were to fail (say due to a spanning tree failure), then only that one plane would fail. The remaining planes would stay running. - -### Footnotes - -### Note 1 - -In this document (and in all {{prodname}} documents) we tend to use the term _endpoint_ to refer to a virtual machine, container, appliance, bare metal server, or any other entity that is connected to a {{prodname}} network. If we are referring to a specific type of endpoint, we will call that out (such as referring to the behavior of VMs as distinct from containers). - -### Note 2 - -We are using logical switches in this example. Physical ToRs could also be used, or a mix of the two (say 2 logical switches hosted on each physical switch). - -### Note 3 - -We use IPv6 here purely as an example. IPv4 would be configured similarly. diff --git a/calico_versioned_docs/version-3.25/reference/architecture/design/l3-interconnect-fabric.mdx b/calico_versioned_docs/version-3.25/reference/architecture/design/l3-interconnect-fabric.mdx deleted file mode 100644 index ee58dbba6a..0000000000 --- a/calico_versioned_docs/version-3.25/reference/architecture/design/l3-interconnect-fabric.mdx +++ /dev/null @@ -1,282 +0,0 @@ ---- -description: Understand considerations for implementing interconnect fabrics with Calico. ---- - -# Calico over IP fabrics - -{{prodname}} provides an end-to-end IP network that interconnects the endpoints ([note 1](#note-1)) in a scale-out or cloud environment. To do that, it needs an _interconnect fabric_ to provide the physical networking layer on which {{prodname}} operates ([note 2](#note-2)). - -Although {{prodname}} is designed to work with any underlying interconnect fabric that can support IP traffic, the fabric that has the least considerations attached to its implementation is an Ethernet fabric as -discussed in [Calico over Ethernet fabrics](l2-interconnect-fabric.mdx). - -In most cases, the Ethernet fabric is the appropriate choice, but there are infrastructures where L3 (an IP fabric) has already been deployed, or will be deployed, and it makes sense for {{prodname}} to operate in those -environments. - -However, because {{prodname}} is, itself, a routed infrastructure, there are more engineering, architecture, and operations considerations that have to be weighed when running {{prodname}} with an IP routed interconnection -fabric. We will briefly outline those in the rest of this post. That said, {{prodname}} operates equally well with Ethernet or IP interconnect fabrics. - -## Background - -### Basic {{prodname}} architecture overview - -A description of the {{prodname}} architecture can be found in our [architectural overview](../overview.mdx). However, a brief discussion of the routing and data paths is useful for -the discussion. - -In a {{prodname}} network, each compute server acts as a router for all of the endpoints that are hosted on that compute server. We call that function a vRouter. The data path is provided by the Linux kernel, the control -plane by a BGP protocol server, and management plane by {{prodname}}'s on-server agent, _Felix_. - -Each endpoint can only communicate through its local vRouter, and the first and last _hop_ in any {{prodname}} packet flow is an IP router hop through a vRouter. Each vRouter announces all of the endpoints it is attached to all the other vRouters and other routers on the infrastructure fabric, using BGP, usually with BGP route reflectors to -increase scale. A discussion of why we use BGP can be found in [Why BGP?](https://www.tigera.io/blog/why-bgp/). - -Access control lists (ACLs) enforce security (and other) policy as directed by whatever cloud orchestrator is in use. There are other components in the {{prodname}} architecture, but they are irrelevant to the interconnect network fabric discussion. - -### Overview of current common IP scale-out fabric architectures - -There are two approaches to building an IP fabric for a scale-out infrastructure. However, all of them, to date, have assumed that the edge router in the infrastructure is the top of rack (TOR) switch. In the {{prodname}} model, that function is pushed to the compute server itself. - -The two approaches are: - -**Routing infrastructure is based on some form of IGP** - -Due to the limitations in scale of IGP networks, the {{prodname}} team does not believe that using an IGP to distribute endpoint reachability information will adequately scale in a {{prodname}} environment. However, it is possible to use a combination of IGP and BGP in the interconnect fabric, where an IGP communicates the path to the _next-hop_ router (in {{prodname}}, this is often the destination compute server) and BGP is used to distribute the actual next-hop for a given endpoint. This is a valid model, and, in fact is the most common approach in a widely distributed IP network (say a carrier's backbone network). The design of these networks is somewhat complex though, and will not be addressed further in this article. ([note 3](#note-3)). - -**Routing infrastructure is based entirely on BGP** - -In this model, the IP network is "tight enough" or has a small enough diameter that BGP can be used to distribute endpoint routes, and the paths to the next-hops for those routes is known to all of the routers in the network (in a {{prodname}} network this includes the compute servers). This is the network model that this note will address. - -In this article, we will cover the second option because it is more common in the scale-out world. - -### BGP-only interconnect fabrics - -There are multiple methods to build a BGP-only interconnect fabric. We will focus on three models, each with two widely viable variations. There are other options, and we will briefly touch on why we didn't include some of them in [Other Options](#other-options). - -The two methods are: - -- A BGP fabric where each of the TOR switches (and their subsidiary compute servers) are a unique [Autonomous System (AS)]() - and they are interconnected via either an Ethernet switching plane provided by the spine switches in a [leaf/spine](http://bradhedlund.com/2012/10/24/video-a-basic-introduction-to-the-leafspine-data-center-networking-fabric-design/) architecture, or via a set of spine switches, each of which is also a unique AS. We'll refer to this as the _AS per rack_ model. This model is detailed in [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938). - -- A BGP fabric where each of the compute servers is a unique AS, and the TOR switches make up a transit AS. We'll refer to this as the _AS per server_ model. - -Each of these models can either have an Ethernet or IP spine. In the case of an Ethernet spine, each spine switch provides an isolated Ethernet connection _plane_ as in the {{prodname}} Ethernet interconnect fabric model and each TOR switch is connected to each spine switch. - -Another model is where each spine switch is a unique AS, and each TOR switch BGP peers with each spine switch. In both cases, the TOR switches use ECMP to load-balance traffic between all available spine switches. - -### BGP network design considerations - -Contrary to popular opinion, BGP is actually a fairly simple protocol. For example, the BGP configuration on a {{prodname}} compute server is approximately sixty lines long, not counting comments. The perceived complexity is due to the things that you can _do_ with BGP. Many uses of BGP involve complex policy rules, where the behavior of BGP can be modified to meet technical (or business, financial, political, etc.) requirements. A default {{prodname}} network does not venture into those areas, ([note 4](#note-4)) and therefore is fairly straight forward. - -That said, there are a few design rules for BGP that need to be kept in mind when designing an IP fabric that will interconnect nodes in a {{prodname}} network. These BGP design requirements _can_ be worked around, if necessary, but doing so takes the designer out of the standard BGP _envelope_ and should only be done by an implementer who is _very_ comfortable with advanced BGP design. - -These considerations are: - -- AS continuity or _AS puddling_ - - Any router in an AS _must_ be able to communicate with any other router in that same AS without transiting another AS. - -- Next hop behavior - - By default BGP routers do not change the _next hop_ of a route if it is peering with another router in its same AS. The inverse is also true, a BGP router will set itself as the _next hop_ of a route if it is peering with a router in another AS. - -- Route reflection - - All BGP routers in a given AS must _peer_ with all the other routers in that AS. This is referred to a _complete BGP mesh_. This can become problematic as the number of routers in the AS scales up. The use of _route reflectors_ reduce the need for the complete BGP mesh. However, route reflectors also have scaling considerations. - -- Endpoints - - In a {{prodname}} network, each endpoint is a route. Hardware networking platforms are constrained by the number of routes they can learn. This is usually in range of 10,000's or 100,000's of routes. Route aggregation can help, but that is usually dependent on the capabilities of the scheduler used by the orchestration software (_e.g._ OpenStack). - -A deeper discussion of these considerations can be found in the [IP Fabric Design Considerations](#ip-fabric-design-considerations). - -The designs discussed below address these considerations. - -### The AS Per Rack model - -This model is the closest to the model suggested by [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938). - -As mentioned earlier, there are two versions of this model, one with an set of Ethernet planes interconnecting the ToR switches, and the other where the core planes are also routers. The following diagrams may be useful for the discussion. - -![](/img/calico/l3-fabric-diagrams-as-rack-l2-spine.png) - -The diagram above shows the **AS per rack model** where the ToR switches are physically meshed via a set of Ethernet switching planes. - -![](/img/calico/l3-fabric-diagrams-as-rack-l3-spine.png) - -The diagram above shows the **AS per rack model** where the ToR switches are physically meshed via a set of discrete BGP spine routers, each in their own AS. - -In this approach, every ToR-ToR or ToR-Spine (in the case of an AS per spine) link is an eBGP peering which means that there is no route-reflection possible (using standard BGP route reflectors) _north_ of the ToR switches. - -If the L2 spine option is used, the result of this is that each ToR must either peer with every other ToR switch in the cluster (which could be hundreds of peers). - -If the AS per spine option is used, then each ToR only has to peer with each spine (there are usually somewhere between two and sixteen spine switches in a pod). However, the spine switches must peer with all ToR -switches (again, that would be hundreds, but most spine switches have more control plane capacity than the average ToR, so this might be more scalable in many circumstances). - -Within the rack, the configuration is the same for both variants, and is somewhat different than the configuration north of the ToR. - -Every router within the rack, which, in the case of {{prodname}} is every compute server, shares the same AS as the ToR that they are connected to. That connection is in the form of an Ethernet switching layer. Each router in the rack must be directly connected to enable the AS to remain contiguous. The ToR's _router_ function is then connected to that Ethernet switching layer as well. The actual configuration of this is dependent on the ToR in use, but usually it means that the ports that are connected to the compute servers are treated as _subnet_ or _segment_ ports, and then the ToR's _router_ function has a single interface into that subnet. - -This configuration allows each compute server to connect to each other compute server in the rack without going through the ToR router, but it will, of course, go through the ToR switching function. The compute servers and the ToR router could all be directly meshed, or a route reflector could be used within the rack, either hosted on the ToR -itself, or as a virtual function hosted on one or more compute servers within the rack. - -The ToR, as the eBGP router redistributes all of the routes from other ToRs as well as routes external to the data center to the compute servers that are in its AS, and announces all of the routes from within -the AS (rack) to the other ToRs and the larger world. This means that each compute server will see the ToR as the next hop for all external routes, and the individual compute servers are the next hop for all routes internal to the rack. - -### The AS per Compute Server model - -This model takes the concept of an AS per rack to its logical conclusion. In the earlier referenced [IETF RFC 7938](https://datatracker.ietf.org/doc/html/rfc7938) the assumption in the overall model is that the ToR is first tier aggregating and routing element. In {{prodname}}, the ToR, if it is an L3 router, is actually the second tier. Remember, in {{prodname}}, the compute server is always the first/last router for an endpoint, and is also the first/last point of aggregation. - -Therefore, if we follow the architecture of the draft, the compute server, not the ToR should be the AS boundary. The differences can be seen in the following two diagrams. - -![](/img/calico/l3-fabric-diagrams-as-server-l2-spine.png) - -The diagram above shows the _AS per compute server model_ where the ToR -switches are physically meshed via a set of Ethernet switching planes. - -![](/img/calico/l3-fabric-diagrams-as-server-l3-spine.png) - -The diagram above shows the _AS per compute server model_ where the ToR switches are physically connected to a set of independent routing planes. - -As can be seen in these diagrams, there are still the same two variants as in the _AS per rack_ model, one where the spine switches provide a set of independent Ethernet planes to interconnect the ToR switches, and the other where that is done by a set of independent routers. - -The real difference in this model, is that the compute servers as well as the ToR switches are all independent autonomous systems. To make this work at scale, the use of four byte AS numbers as discussed in [RFC 4893](http://www.faqs.org/rfcs/rfc4893.html). Without -using four byte AS numbering, the total number of ToRs and compute servers in a {{prodname}} fabric would be limited to the approximately five thousand available private AS ([note 5](#note-5)) numbers. If four byte AS numbers are used, there are approximately ninety-two million private AS numbers available. This should be sufficient for any given {{prodname}} fabric. - -The other difference in this model _vs._ the AS per rack model, is that there are no route reflectors used, as all BGP peerings are eBGP. In this case, each compute server in a given rack peers with its ToR switch which is also acting as an eBGP router. For two servers within the same rack to communicate, they will be routed through the ToR. Therefore, each server will have one peering to each ToR it is connected to, and each ToR will have a peering with each compute server that it is connected to (normally, all the compute servers in the rack). - -The inter-ToR connectivity considerations are the same in scale and scope as in the AS per rack model. - -### The Downward Default model - -The final model is a bit different. Whereas, in the previous models, all of the routers in the infrastructure carry full routing tables, and leave their AS paths intact, this model ([note 6](#note-6)) removes the AS numbers at -each stage of the routing path. This is to prevent routes from other nodes in the network from not being installed due to it coming from the _local_ AS (since they share the source and dest of the route share the same AS). - -The following diagram will show the AS relationships in this model. - -![](/img/calico/l3-fabric-downward-default.png) - -In the diagram above, we are showing that all {{prodname}} nodes share the same AS number, as do all ToR switches. However, those ASs are different (_A1_ is not the same network as _A2_, even though the both share the -same AS number _A_ ). - -Although the use of a single AS for all ToR switches, and another for all compute servers simplifies deployment (standardized configuration), the real benefit comes in the offloading of the routing tables in the ToR -switches. - -In this model, each router announces all of its routes to its upstream peer (the {{prodname}} routers to their ToR, the ToRs to the spine switches). However, in return, the upstream router only announces a default route. -In this case, a given {{prodname}} router only has routes for the endpoints that are locally hosted on it, as well as the default from the ToR. Because the ToR is the only route for the {{prodname}} network the rest of the -network, this matches reality. The same happens between the ToR switches and the spine. This means that the ToR only has to install the routes that are for endpoints that are hosted on its downstream {{prodname}} nodes. -Even if we were to host 200 endpoints per {{prodname}} node, and stuff 80 {{prodname}} nodes in each rack, that would still limit the routing table on the ToR to a maximum of 16,000 entries (well within the capabilities of -even the most modest of switches). - -Because the default is originated by the Spine (originally) there is no chance for a downward announced route to originate from the recipient's AS, preventing the **AS puddling** problem. - -There is one (minor) drawback to this model, in that all traffic that is destined for an invalid destination (the destination IP does not exist) will be forwarded to the spine switches before they are dropped. - -It should also be noted that the spine switches do need to carry all of the {{prodname}} network routes, just as they do in the routed spines in the previous examples. In short, this model imposes no more load on the -spines than they already would have, and substantially reduces the amount of routing table space used on the ToR switches. It also reduces the number of routes in the {{prodname}} nodes, but, as we have discussed -before, that is not a concern in most deployments as the amount of memory consumed by a full routing table in {{prodname}} is a fraction of the total memory available on a modern compute server. - -## Recommendation - -The {{prodname}} team recommends the use of the [AS per rack](#the-as-per-rack-model) model if the resultant routing table size can be accommodated by the ToR and spine switches, remembering to account for projected growth. - -If there is concern about the route table size in the ToR switches, the {{prodname}} recommends the [Downward Default](#the-downward-default-model) model. - -If there are concerns about both the spine and ToR switch route table capacity, or there is a desire to run a very simple L2 fabric to connect the {{prodname}} nodes, then the user should consider the Ethernet fabric as -detailed in [Calico over Ethernet fabrics](l2-interconnect-fabric.mdx). - -If you are interested in the AS per compute server, the {{prodname}} team would be very interested in discussing the deployment of that model. - -## Other options - -The way the physical and logical connectivity is laid out in this article, and the [Ethernet fabric](l2-interconnect-fabric.mdx), the next hop router for a given route is always directly connected to the router receiving that route. This makes the need for another protocol to distribute the next hop routes unnecessary. - -However, in many (or most) WAN BGP networks, the routers within a given AS may not be directly adjacent. Therefore, a router may receive a route with a next hop address that it is not directly adjacent to. In those cases, an IGP, such as OSPF or IS-IS, is used by the routers within a given AS to determine the path to the BGP next hop route. - -There may be {{prodname}} architectures where there are similar models where the routers within a given AS are not directly adjacent. In those models, the use of an IGP in {{prodname}} may be warranted. The configuration -of those protocols are, however, beyond the scope of this technical -note. - -### IP fabric design considerations - -**AS puddling** - -The first consideration is that an AS must be kept contiguous. This means that any two nodes in a given AS must be able to communicate without traversing any other AS. If this rule is not observed, the effect is often referred to as _AS puddling_ and the network will _not_ function correctly. - -A corollary of that rule is that any two administrative regions that share the same AS number, are in the same AS, even if that was not the desire of the designer. BGP has no way of identifying if an AS is local or foreign other than the AS number. Therefore re-use of an AS number for two _networks_ that are not directly connected, but only connected -through another _network_ or AS number will not work without a lot of policy changes to the BGP routers. - -Another corollary of that rule is that a BGP router will not propagate a route to a peer if the route has an AS in its path that is the same AS as the peer. This prevents loops from forming in the network. The effect of this prevents two routers in the same AS from transiting another router (either in that AS or not). - -**Next hop behavior** - -Another consideration is based on the differences between iBGP and eBGP. BGP operates in two modes, if two routers are BGP peers, but share the same AS number, then they are considered to be in an _internal_ BGP (or iBGP) peering relationship. If they are members of different AS's, then they are in an _external_ or eBGP relationship. - -BGP's original design model was that all BGP routers within a given AS would know how to get to one another (via static routes, IGP ([note 7](#note-7)) routing protocols, or the like), and that routers in different ASs would -not know how to reach one another unless they were directly connected. - -Based on that design point, routers in an iBGP peering relationship assume that they do not transit traffic for other iBGP routers in a given AS (i.e. A can communicate with C, and therefore will not need to route through B), and therefore, do not change the _next hop_ attribute in BGP ([note 8](#note-8)). - -A router with an eBGP peering, on the other hand, assumes that its eBGP peer will not know how to reach the next hop route, and then will substitute its own address in the next hop field. This is often referred -to as _next hop self_. - -In the {{prodname}} [Ethernet fabric](l2-interconnect-fabric.mdx) -model, all of the compute servers (the routers in a {{prodname}} network) are directly connected over one or more Ethernet network(s) and therefore are directly reachable. In this case, a router in the {{prodname}} network -does not need to set _next hop self_ within the {{prodname}} fabric. - -The models we present in this article ensure that all routes that may traverse a non-{{prodname}} router are eBGP routes, and therefore _next hop self_ is automatically set correctly. If a deployment of {{prodname}} in -an IP interconnect fabric does not satisfy that constraint, then _next hop self_ must be appropriately configured. - -**Route reflection** - -As mentioned above, BGP expects that all of the iBGP routers in a network can see (and speak) directly to one another, this is referred to as a _BGP full mesh_. In small networks this is not a problem, but it does become interesting as the number of routers increases. For example, if you have 99 BGP routers in an AS and wish to add one more, you would -have to configure the peering to that new router on each of the 99 existing routers. Not only is this a problem at configuration time, it means that each router is maintaining 100 protocol adjacencies, which can start being a drain on constrained resources in a router. While this might be _interesting_ at 100 routers, it becomes an impossible task -with 1000's or 10,000's of routers (the potential size of a {{prodname}} network). - -Conveniently, large scale/Internet scale networks solved this problem almost 20 years ago by deploying BGP route reflection as described in [RFC 1966](http://www.faqs.org/rfcs/rfc1966.html). This is a technique supported by almost all BGP routers today. In a large network, a number of route reflectors ([note 9](#note-9)) are evenly distributed and each iBGProuter is _peered_ with one or more route reflectors (usually 2 or 3). Each route reflector can handle 10's or 100's of route reflector clients (in {{prodname}}'s case, the compute server), depending on the route reflector being used. Those route reflectors are, in turn, peered with each other. This means that there are an order of magnitude less route reflectors that need to be completely meshed, and each route reflector client is only configured to peer to 2 or 3 route reflectors. This is much easier to manage. - -Other route reflector architectures are possible, but those are beyond the scope of this document. - -**Endpoints** - -The final consideration is the number of endpoints in a {{prodname}} network. In the [Ethernet fabric](l2-interconnect-fabric.mdx) case the number of endpoints is not constrained by the interconnect fabric, as the interconnect fabric does not _see_ the actual endpoints, it only _sees_ the actual vRouters, or compute servers. This is not the case in an IP fabric, however. IP networks forward by using the -destination IP address in the packet, which, in {{prodname}}'s case, is the destination endpoint. That means that the IP fabric nodes (ToR switches and/or spine switches, for example) must know the routes to each endpoint in the network. They learn this by participating as route reflector clients in the BGP mesh, just as the {{prodname}} vRouter/compute server does. - -However, unlike a compute server which has a relatively unconstrained amount of memory, a physical switch is either memory constrained, or quite expensive. This means that the physical switch has a limit on how many _routes_ it can handle. The current industry standard for modern commodity switches is in the range of 128,000 routes. This means that, -without other routing _tricks_, such as aggregation, a {{prodname}} installation that uses an IP fabric will be limited to the routing table size of its constituent network hardware, with a reasonable upper limit -today of 128,000 endpoints. - -### Footnotes - -### Note 1 - -In {{prodname}}'s terminology, an endpoint is an IP address and interface. It could refer to a VM, a container, or even a process bound to an IP address running on a bare metal server. - -### Note 2 - -This interconnect fabric provides the connectivity between the {{prodname}} (v)Router (in almost all cases, the compute servers) nodes, as well as any other elements in the fabric (_e.g._ bare metal servers, border routers, and appliances). - -### Note 3 - -If there is interest in a discussion of this approach, please let us know. The {{prodname}} team could either arrange a discussion, or if there was enough interest, publish a follow-up tech note. - -### Note 4 - -However those tools are available if a given {{prodname}} instance needs to utilize those policy constructs. - -### Note 5 - -The two byte AS space reserves approximately the last five thousand AS numbers for private use. There is no technical reason why other AS numbers could not be used. However the re-use of global scope AS numbers within a private infrastructure is strongly discouraged. The chance for routing system failure or incorrect routing is substantial, and not restricted to the entity that is doing the reuse. - -### Note 6 - -We first saw this design in a customer's lab, and thought it innovative enough to share (we asked them first, of course). Similar **AS Path Stripping** approaches are used in ISP networks, however. - -### Note 7 - -An Interior Gateway Protocol is a local routing protocol that does not cross an AS boundary. The primary IGPs in use today are OSPF and IS-IS. While complex iBGP networks still use IGP routing protocols, a data center is normally a fairly simple network, even if it has many routers in it. Therefore, in the data center case, the use of an IGP can often be disposed of. - -### Note 8 - -A Next hop is an attribute of a route announced by a routing protocol. In simple terms a route is defined by a _target_, or the destination that is to be reached, and a _next hop_, which is the next router in the path to reach that target. There are many other characteristics in a route, but those are well beyond the scope of this post. - -### Note 9 - -A route reflector may be a physical router, a software appliance, or simply a BGP daemon. It only processes routing messages, and does not pass actual data plane traffic. However, some route reflectors are co-resident on regular routers that do pass data plane traffic. Although they may sit on one platform, the functions are distinct. diff --git a/calico_versioned_docs/version-3.25/reference/architecture/index.mdx b/calico_versioned_docs/version-3.25/reference/architecture/index.mdx deleted file mode 100644 index 7d3a0a9862..0000000000 --- a/calico_versioned_docs/version-3.25/reference/architecture/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Understand Calico components, network design, and the data path between workloads. -hide_table_of_contents: true ---- - -# Architecture - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/architecture/overview.mdx b/calico_versioned_docs/version-3.25/reference/architecture/overview.mdx deleted file mode 100644 index 1e92d34122..0000000000 --- a/calico_versioned_docs/version-3.25/reference/architecture/overview.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -description: Learn the basic Calico components. ---- - -# Component architecture - -## {{prodname}} components - -The following diagram shows the required and optional {{prodname}} components for a Kubernetes, on-premises deployment with networking and network policy. - -![calico-components](/img/calico/architecture-calico.svg) - -**{{prodname}} components** - -- [Calico API server](#calico-api-server) -- [Felix](#felix) -- [BIRD](#bird) -- [confd](#confd) -- [Dikastes](#dikastes) -- [CNI plugin](#cni-plugin) -- [Datastore plugin](#datastore-plugin) -- [IPAM plugin](#ipam-plugin) -- [kube-controllers](#kube-controllers) -- [Typha](#typha) -- [calicoctl](#calicoctl) - -**Cloud orchestrator plugins** - -- [Plugins for cloud orchestrators](#plugins-for-cloud-orchestrators) - -## Calico API server - -**Main task**: Lets you manage {{prodname}} resources directly with `kubectl`. - -## Felix - -**Main task**: Programs routes and ACLs, and anything else required on the host to provide desired connectivity for the endpoints on that host. Runs on each machine that hosts endpoints. Runs as an agent daemon. [Felix resource](../resources/felixconfig.mdx). - -Depending on the specific orchestrator environment, Felix is responsible for: - -- **Interface management** - - Programs information about interfaces into the kernel so the kernel can correctly handle the traffic from that endpoint. In particular, it ensures that the host responds to ARP requests from each workload with the MAC of the host, and enables IP forwarding for interfaces that it manages. It also monitors interfaces to ensure that the programming is applied at the appropriate time. - -- **Route programming** - - Programs routes to the endpoints on its host into the Linux kernel FIB (Forwarding Information Base). This ensures that packets destined for those endpoints that arrive on at the host are forwarded accordingly. - -- **ACL programming** - - Programs ACLs into the Linux kernel to ensure that only valid traffic can be sent between endpoints, and that endpoints cannot circumvent {{prodname}} security measures. - -- **State reporting** - - Provides network health data. In particular, it reports errors and problems when configuring its host. This data is written to the datastore so it visible to other components and operators of the network. - -:::note - -`{{nodecontainer}}` can be run in _policy only mode_ where Felix runs without BIRD and confd. This provides policy management without route distribution between hosts, and is used for deployments like managed cloud providers. You enable this mode by setting the environment variable, `CALICO_NETWORKING_BACKEND=none` before starting the node. - -::: - -## BIRD - -**Main task**: Gets routes from Felix and distributes to BGP peers on the network for inter-host routing. Runs on each node that hosts a Felix agent. Open source, internet routing daemon. [BIRD](../configure-calico-node.mdx#content-main). - -The BGP client is responsible for: - -- **Route distribution** - - When Felix inserts routes into the Linux kernel FIB, the BGP client distributes them to other nodes in the deployment. This ensures efficient traffic routing for the deployment. - -- **BGP route reflector configuration** - - BGP route reflectors are often configured for large deployments rather than a standard BGP client. BGP route reflectors acts as a central point for connecting BGP clients. (Standard BGP requires that every BGP client be connected to every other BGP client in a mesh topology, which is difficult to maintain.) - - For redundancy, you can seamlessly deploy multiple BGP route reflectors. BGP route reflectors are involved only in control of the network: no endpoint data passes through them. When the {{prodname}} BGP client advertises routes from its FIB to the route reflector, the route reflector advertises those routes out to the other nodes in the deployment. - -## confd - -**Main task**: Monitors {{prodname}} datastore for changes to BGP configuration and global defaults such as AS number, logging levels, and IPAM information. Open source, lightweight configuration management tool. - -Confd dynamically generates BIRD configuration files based on the updates to data in the datastore. When the configuration file changes, confd triggers BIRD to load the new files. [Configure confd](../configure-calico-node.mdx#content-main), and [confd project](https://github.com/kelseyhightower/confd). - -## Dikastes - -**Main task**: Enforces network policy for Istio service mesh. Runs on a cluster as a sidecar proxy to Istio Envoy. - -(Optional) {{prodname}} enforces network policy for workloads at both the Linux kernel (using iptables, L3-L4), and at L3-L7 using a Envoy sidecar proxy called Dikastes, with cryptographic authentication of requests. Using multiple enforcement points establishes the identity of the remote endpoint based on multiple criteria. The host Linux kernel enforcement protects your workloads even if the workload pod is compromised, and the Envoy proxy is bypassed. - -:::note - -Dikastes can be terminated by issuing an HTTP POST request to /terminate on the socket address specified using environment variables `DIKASTES_HTTP_BIND_ADDR` and `DIKASTES_HTTP_BIND_PORT`. This is to allow for graceful termination so that Kubernetes Jobs can complete successfully and is analogous to Envoy's /quitquitquit. eg. `curl -XPOST http://127.0.0.1:7777/terminate` - -::: - -## CNI plugin - -**Main task**: Provides {{prodname}} networking for Kubernetes clusters. - -The {{prodname}} binary that presents this API to Kubernetes is called the CNI plugin, and must be installed on every node in the Kubernetes cluster. The {{prodname}} CNI plugin allows you to use {{prodname}} networking for any orchestrator that makes use of the CNI networking specification. Configured through the standard [CNI configuration mechanism](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration), and [{{prodname}} CNI plugin](../configure-cni-plugins.mdx). - -## Datastore plugin - -**Main task**: Increases scale by reducing each node’s impact on the datastore. It is one of the {{prodname}} [CNI plugins](../configure-cni-plugins.mdx). - -- **Kubernetes API datastore (kdd)** - - The advantages of using the Kubernetes API datastore (kdd) with {{prodname}} are: - - - Simpler to manage because it does not require an extra datastore - - Use Kubernetes RBAC to control access to {{prodname}} resources - - Use Kubernetes audit logging to generate audit logs of changes to {{prodname}} resources - -- **etcd** - - `etcd` is a consistent, highly-available distributed key-value store that provides data storage for the {{prodname}} network, and for communications between components. `etcd` is supported for protecting only non-cluster hosts (as of {{prodname}} v3.1). For completeness, `etcd ` advantages are: - - - Lets you run {{prodname}} on non-Kubernetes platforms - - Separation of concerns between Kubernetes and {{prodname}} resources, for example allowing you to scale the datastores independently - - Lets you run a {{prodname}} cluster that contains more than just a single Kubernetes cluster, for example, bare metal servers with {{prodname}} host protection interworking with a Kubernetes cluster; or multiple Kubernetes clusters. - - [etcd admin guide](https://coreos.com/etcd/docs/latest/admin_guide.html#optimal-cluster-size) - -## IPAM plugin - -**Main task**: Uses {{prodname}}’s IP pool resource to control how IP addresses are allocated to pods within the cluster. It is the default plugin used by most {{prodname}} installations. It is one of the {{prodname}} [CNI plugins](../configure-cni-plugins.mdx). - -## kube-controllers - -**Main task**: Monitors the Kubernetes API and performs actions based on cluster state. [kube-controllers](../kube-controllers/configuration.mdx). - -The `tigera/kube-controllers` container includes the following controllers: - -- Policy controller -- Namespace controller -- Serviceaccount controller -- Workloadendpoint controller -- Node controller - -## Typha - -**Main task**: Increases scale by reducing each node’s impact on the datastore. Runs as a daemon between the datastore and instances of Felix. Installed by default, but not configured. [Typha description](https://github.com/projectcalico/calico/tree/master/typha), and [Typha component](../typha/index.mdx). - -Typha maintains a single datastore connection on behalf of all of its clients like Felix and confd. It caches the datastore state and deduplicates events so that they can be fanned out to many listeners. Because one Typha instance can support hundreds of Felix instances, it reduces the load on the datastore by a large factor. And because Typha can filter out updates that are not relevant to Felix, it also reduces Felix’s CPU usage. In a high-scale (100+ node) Kubernetes cluster, this is essential because the number of updates generated by the API server scales with the number of nodes. - -## calicoctl - -**Main task**: Command line interface to create, read, update, and delete {{prodname}} objects. `calicoctl` command line is available on any host with network access to the {{prodname}} datastore as either a binary or a container. Requires separate installation. [calicoctl](../calicoctl/index.mdx). - -## Plugins for cloud orchestrators - -**Main task**: Translates the orchestrator APIs for managing networks to the {{prodname}} data-model and datastore. - -For cloud providers, {{prodname}} has a separate plugin for each major cloud orchestration platform. This allows {{prodname}} to tightly bind to the orchestrator, so users can manage the {{prodname}} network using their orchestrator tools. When required, the orchestrator plugin provides feedback from the {{prodname}} network to the orchestrator. For example, providing information about Felix liveness, and marking specific endpoints as failed if network setup fails. diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/apply.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/apply.mdx deleted file mode 100644 index ed64e56aa3..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/apply.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -description: Command to apply a policy. ---- - -# calicoctl apply - -This sections describes the `calicoctl apply` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API). -Please refer to the -[Resources section](../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl apply' command - -Run `calicoctl apply --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl apply --filename= [--recursive] [--skip-empty] [--config=] [--namespace=] - -Examples: - # Apply a policy using the data in policy.yaml. - calicoctl apply -f ./policy.yaml - - # Apply a policy based on the JSON passed into stdin. - cat policy.json | calicoctl apply -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to apply the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The apply command is used to create or replace a set of resources by filename - or stdin. JSON and YAML formats are accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - When applying a resource: - - if the resource does not already exist (as determined by it's primary - identifiers) then it is created - - if the resource already exists then the specification for that resource is - replaced in it's entirety by the new resource specification. - - The output of the command indicates how many resources were successfully - applied, and the error reason if an error occurred. - - The resources are applied in the order they are specified. In the event of a - failure applying a specific resource it is possible to work out which - resource failed based on the number of resources successfully applied - - When applying a resource to perform an update, the complete resource spec - must be provided, it is not sufficient to supply only the fields that are - being updated. -``` - -### Examples - -1. Apply a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl apply -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully applied - - ``` - Successfully applied 8 resource(s) - ``` - -1. Apply two policy resources based on the JSON passed into stdin. - - ```bash - cat policy.json | calicoctl apply -f - - ``` - - Results indicate success. - - ``` - Successfully applied 2 'policy' resource(s) - ``` - -### Options - -``` --f --filename= Filename to use to apply the resource. If set to - "-" loads from stdin. --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx) -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../resources/networkpolicy.mdx) for details on the {{prodname}} selector-based policy model diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/convert.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/convert.mdx deleted file mode 100644 index 914388ea44..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/convert.mdx +++ /dev/null @@ -1,96 +0,0 @@ ---- -description: Command to convert contents of policy.yaml to v3 policy. ---- - -# calicoctl convert - -This sections describes the `calicoctl convert` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API). -Please refer to the -[Resources section](../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl convert' command - -Run `calicoctl convert --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl convert --filename= - [--output=] [--ignore-validation] - -Examples: - # Convert the contents of policy.yaml to a Calico v3 policy. - calicoctl convert -f ./policy.yaml -o yaml - - # Convert a policy based on the JSON passed into stdin. - cat policy.json | calicoctl convert -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to create the resource. If set to - "-" loads from stdin. - -o --output= Output format. One of: yaml or json. - [Default: yaml] - --ignore-validation Skip validation on the converted manifest. - -Description: - Convert config files from Calico v1 or Kubernetes to Calico v3 API versions. Both YAML and JSON formats are accepted. - - The default output will be printed to stdout in YAML format. -``` - -:::note - -Currently the only Kubernetes API resource supported for conversion is NetworkPolicy. - -::: - -### Examples - -1. Convert a set of resources (of mixed type) from Calico v1 or Kubernetes to Calico v3 APIs using the data in resources.yaml. - - ```bash - calicoctl convert -f multi-resource-v1.yaml -o yaml > multi-resource-v3.yaml - ``` - - :::tip - - By default convert command outputs the converted resources to stdout, but it can be redirected to a file. - - ::: - -1. Convert a policy based on the JSON passed into stdin. - - ```bash - cat policy.json | calicoctl convert -f - - ``` - - Result will be printed to stdout. - -### Options - -``` --f --filename= Filename to use to convert the resource. If set to - "-" loads from stdin. --o --output= Output format. One of: yaml or json. - [Default: yaml] ---ignore-validation Skip validation on the converted manifest. -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx) -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [calicoctl get](get.mdx) for details on `calicoctl get` command to get the resources. diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/create.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/create.mdx deleted file mode 100644 index 846bbc3806..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/create.mdx +++ /dev/null @@ -1,140 +0,0 @@ ---- -description: Command to create a policy. ---- - -# calicoctl create - -This sections describes the `calicoctl create` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API). -Please refer to the -[Resources section](../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl create' command - -Run `calicoctl create --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl create --filename= [--recursive] [--skip-empty] [--skip-exists] [--config=] [--namespace=] - -Examples: - # Create a policy using the data in policy.yaml. - calicoctl create -f ./policy.yaml - - # Create a policy based on the JSON passed into stdin. - cat policy.json | calicoctl create -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to create the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - --skip-exists Skip over and treat as successful any attempts to - create an entry that already exists. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The create command is used to create a set of resources by filename or stdin. - JSON and YAML formats are accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - Attempting to create a resource that already exists is treated as a - terminating error unless the --skip-exists flag is set. If this flag is set, - resources that already exist are skipped. - - The output of the command indicates how many resources were successfully - created, and the error reason if an error occurred. If the --skip-exists - flag is set then skipped resources are included in the success count. - - The resources are created in the order they are specified. In the event of a - failure creating a specific resource it is possible to work out which - resource failed based on the number of resources successfully created. -``` - -### Examples - -1. Create a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl create -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully created. - - ``` - Successfully created 8 resource(s) - ``` - -1. Create the same set of resources reading from stdin. - - ```bash - cat resources.yaml | calicoctl create -f - - ``` - - Results indicate failure because the first resource (in this case a Profile) - already exists. - - ``` - Failed to create any resources: resource already exists: Profile(name=profile1) - ``` - -### Options - -``` --f --filename= Filename to use to create the resource. If set to - "-" loads from stdin. - --skip-exists Skip over and treat as successful any attempts to - create an entry that already exists. --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx) -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../resources/networkpolicy.mdx) for details on the {{prodname}} selector-based policy model diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/index.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/index.mdx deleted file mode 100644 index b8957eb3c9..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl datastore commands. -hide_table_of_contents: true ---- - -# datastore - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/export.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/export.mdx deleted file mode 100644 index 8970f095cb..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/export.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -description: Command and options for exporting an etcdv3 datastore. ---- - -# calicoctl datastore migrate export - -This sections describes the `calicoctl datastore migrate export` command. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate export' command - -Run `calicoctl datastore migrate export --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl datastore migrate export [--config=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Export the contents of the etcdv3 datastore. Resources will be exported - in yaml and json format. Save the results of this command to a file for - later use with the import command. - - The resources exported include the following: - - IPAMBlocks - - BlockAffinities - - IPAMHandles - - IPAMConfigurations - - IPPools - - BGPConfigurations - - BGPPeers - - ClusterInformations - - FelixConfigurations - - GlobalNetworkPolicies - - GlobalNetworkSets - - HostEndpoints - - KubeControllersConfigurations - - NetworkPolicies - - Networksets - - Nodes - - The following resources are not exported: - - WorkloadEndpoints - - Profiles -``` - -### Exported resources - -The `export` subcommand exports the following Calico resources: - -- IPAMBlocks -- BlockAffinities -- IPAMHandles -- IPAMConfigurations -- IPPools -- BGPConfigurations -- BGPPeers -- ClusterInformations -- FelixConfigurations -- GlobalNetworkPolicies -- GlobalNetworkSets -- HostEndpoints -- NetworkPolicies -- Networksets -- Nodes - -The `export` subcommand does not export the following resources -since they should be generated: - -- WorkloadEndpoints -- Profiles - -### Examples - -Export the contents of an etcdv3 datastore to a file named `etcd-migration`. - -```bash -calicoctl datastore migrate export > etcd-migration -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../../operations/calicoctl/install.mdx) -- [Resources](../../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/import.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/import.mdx deleted file mode 100644 index 7725629146..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/import.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -description: Command and options for importing exported data to a kubernetes datastore. ---- - -# calicoctl datastore migrate import - -This sections describes the `calicoctl datastore migrate import` command. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate import' command - -Run `calicoctl datastore migrate import --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl datastore migrate import --filename= [--config=] - -Options: - -h --help Show this screen. - -f --filename= Filename to use to import resources. If set to - "-" loads from stdin. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Import the contents of the etcdv3 datastore from the file created by the - export command. -``` - -### Examples - -Import the contents of an etcdv3 datastore stored in a file named `etcd-migration`. - -```bash -calicoctl datastore migrate import -f etcd-migration -``` - -### Options - -``` --f --filename= Filename to use to import resources. If set to - "-" loads from stdin. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../../operations/calicoctl/install.mdx) -- [Resources](../../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/index.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/index.mdx deleted file mode 100644 index 2aa67cd9f3..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl datastore migrate commands. -hide_table_of_contents: true ---- - -# Migrate - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/lock.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/lock.mdx deleted file mode 100644 index 24f1fc19e3..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/lock.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -description: Command and options for locking a datastore for migration. ---- - -# calicoctl datastore migrate lock - -This sections describes the `calicoctl datastore migrate lock` command. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate unlock' command - -Run `calicoctl datastore migrate lock --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl datastore migrate lock [--config=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Lock the datastore to prepare it for migration. This prevents any new - Calico resources from affecting the cluster but does not prevent updating - or creating new Calico resources. -``` - -### Examples - -Lock the datastore to prepare it for migration so that any changes to the -data will not affect the cluster during migration. - -```bash -calicoctl datastore migrate lock -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../../operations/calicoctl/install.mdx) -- [Resources](../../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/overview.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/overview.mdx deleted file mode 100644 index 5cb1ac66e9..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/overview.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: Commands for calicoctl datastore migrate. ---- - -# calicoctl datastore migrate - -This section describes the `calicoctl datastore migrate` commands. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate' commands - -Run `calicoctl datastore migrate --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl datastore migrate [...] - - export Export the contents of the etcdv3 datastore to yaml. - import Store and convert yaml of resources into the Kubernetes datastore. - lock Lock the datastore to prevent changes from occurring during datastore migration. - unlock Unlock the datastore to allow changes once the migration is completed. - -Options: - -h --help Show this screen. - -Description: - Migration specific commands for calicoctl. - - See 'calicoctl datastore migrate --help' to read about a specific subcommand. -``` - -## Migrate specific commands - -Details on the `calicoctl datastore migrate` commands are described in the documents linked below -organized by sub command. - -- [calicoctl datastore migrate export](export.mdx) -- [calicoctl datastore migrate import](import.mdx) -- [calicoctl datastore migrate lock](lock.mdx) -- [calicoctl datastore migrate unlock](unlock.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/unlock.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/unlock.mdx deleted file mode 100644 index 96128a908e..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/migrate/unlock.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -description: Command and options for unlocking a datastore after migration. ---- - -# calicoctl datastore migrate unlock - -This sections describes the `calicoctl datastore migrate lock` command. - -Read the [calicoctl Overview](../../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate unlock' command - -Run `calicoctl datastore migrate unlock --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl datastore migrate unlock [--config=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Unlock the datastore to complete migration. This once again allows - Calico resources to take effect in the cluster. -``` - -### Examples - -Unlock the datastore after migration to allow the Calico resources to affect -the cluster. - -```bash -calicoctl datastore migrate unlock -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../../operations/calicoctl/install.mdx) -- [Resources](../../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/overview.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/overview.mdx deleted file mode 100644 index f1975d552f..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/datastore/overview.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -description: Commands for calicoctl datastore ---- - -# calicoctl datastore - -This section describes the `calicoctl datastore` commands. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for 'calicoctl datastore migrate' commands - -Run `calicoctl datastore migrate --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl datastore [...] - - migrate Migrate the contents of an etcdv3 datastore to a Kubernetes datastore. - -Options: - -h --help Show this screen. - -Description: - Datastore specific commands for calicoctl. - - See 'calicoctl datastore --help' to read about a specific subcommand. -``` - -## Datastore specific commands - -Details on the `calicoctl datastore` commands are described in the documents linked below -organized by sub command. - -- [calicoctl datastore migrate](migrate/overview.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/delete.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/delete.mdx deleted file mode 100644 index 829a324591..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/delete.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -description: Command to delete a policy. ---- - -# calicoctl delete - -This sections describes the `calicoctl delete` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API). -Please refer to the -[Resources section](../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl delete' command - -Run `calicoctl delete --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl delete ( ( []) | - --filename=) [--recursive] [--skip-empty] - [--skip-not-exists] [--config=] [--namespace=] - -Examples: - # Delete a policy using the type and name specified in policy.yaml. - calicoctl delete -f ./policy.yaml - - # Delete a policy based on the type and name in the YAML passed into stdin. - cat policy.yaml | calicoctl delete -f - - - # Delete policies with names "foo" and "bar" - calicoctl delete policy foo bar - -Options: - -h --help Show this screen. - -s --skip-not-exists Skip over and treat as successful, resources that - don't exist. - -f --filename= Filename to use to delete the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The delete command is used to delete a set of resources by filename or stdin, - or by type and identifiers. JSON and YAML formats are accepted for file and - stdin format. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * networkPolicy - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - - Attempting to delete a resource that does not exists is treated as a - terminating error unless the --skip-not-exists flag is set. If this flag is - set, resources that do not exist are skipped. - - When deleting resources by type, only a single type may be specified at a - time. The name is required along with any and other identifiers required to - uniquely identify a resource of the specified type. - - The output of the command indicates how many resources were successfully - deleted, and the error reason if an error occurred. If the --skip-not-exists - flag is set then skipped resources are included in the success count. - - The resources are deleted in the order they are specified. In the event of a - failure deleting a specific resource it is possible to work out which - resource failed based on the number of resources successfully deleted. -``` - -### Examples - -1. Delete a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl delete -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully deleted. - - ``` - Successfully deleted 8 resource(s) - ``` - -1. Delete a policy resource by name. The policy is called "policy1". - - ```bash - bin/calicoctl delete policy policy1 - ``` - - Results indicate success. - - ``` - Successfully deleted 1 'policy' resource(s) - ``` - -### Options - -``` --s --skip-not-exists Skip over and treat as successful, resources that - don't exist. --f --filename= Filename to use to delete the resource. If set to - "-" loads from stdin. --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx). -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../resources/networkpolicy.mdx) for details on the {{prodname}} selector-based policy model diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/get.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/get.mdx deleted file mode 100644 index 48147f79a7..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/get.mdx +++ /dev/null @@ -1,272 +0,0 @@ ---- -description: Command to list policies in the default output format. ---- - -# calicoctl get - -This sections describes the `calicoctl get` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API). -Please refer to the -[Resources section](../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl get' command - -Run `calicoctl get --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl get ( ( []) | - --filename=) [--recursive] [--skip-empty] - [--output=] [--config=] [--namespace=] [--all-namespaces] - -Examples: - # List all policy in default output format. - calicoctl get policy - - # List specific policies in YAML format - calicoctl get -o yaml policy my-policy-1 my-policy-2 - -Options: - -h --help Show this screen. - -f --filename= Filename to use to get the resource. If set to - "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -o --output= Output format. One of: yaml, json, ps, wide, - custom-columns=..., go-template=..., - go-template-file=... [Default: ps] - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. - -A --all-namespaces If present, list the requested object(s) across - all namespaces. - --export If present, returns the requested object(s) stripped of - cluster-specific information. This flag will be ignored - if is not specified. - --context= The name of the kubeconfig context to use. - -Description: - The get command is used to display a set of resources by filename or stdin, - or by type and identifiers. JSON and YAML formats are accepted for file and - stdin format. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - - Attempting to get resources that do not exist will simply return no results. - - When getting resources by type, only a single type may be specified at a - time. The name and other identifiers (hostname, scope) are optional, and are - wildcarded when omitted. Thus if you specify no identifiers at all (other - than type), then all configured resources of the requested type will be - returned. - - By default the results are output in a ps-style table output. There are - alternative ways to display the data using the --output option: - - ps Display the results in ps-style output. - wide As per the ps option, but includes more headings. - custom-columns As per the ps option, but only display the columns - that are requested in the comma-separated list. - golang-template Display the results using the specified golang - template. This can be used to filter results, for - example to return a specific value. - golang-template-file Display the results using the golang template that is - contained in the specified file. - yaml Display the results in YAML output format. - json Display the results in JSON output format. - - Note that the data output using YAML or JSON format is always valid to use as - input to all of the resource management commands (create, apply, replace, - delete, get). - - Please refer to the docs at https://projectcalico.docs.tigera.io for more details on - the output formats, including example outputs, resource structure (required - for the golang template definitions) and the valid column names (required for - the custom-columns option). -``` - -### Options - -``` --h --help Show this screen. --f --filename= Filename to use to get the resource. If set to - "-" loads from stdin. --o --output= Output format. One of: yaml, json, ps, wide, - custom-columns=..., go-template=..., - go-template-file=... [Default: ps] --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. --A --all-namespaces If present, list the requested object(s) across - all namespaces. ---export If present, returns the requested object(s) stripped of - cluster-specific information. This flag will be ignored - if the resource name is not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -### Output options - -#### `ps` - -This is the default output format. It displays output in ps-style table output with sufficient columns to -uniquely identify the resource. - -The headings displayed for each resource type is fixed. However, we `wide` option for displaying additional -columns, and `custom-columns` for selecting which columns to display. - -Example: - -```bash -calicoctl get hostEndpoint -``` - -Response: - -``` -NAME NODE -endpoint1 host1 -myhost-eth0 myhost -``` - -#### `wide` - -Similar to the `ps` format, the `wide` option displays output in ps-style table output but with additional columns. - -The headings displayed for each resource type is fixed. See `custom-columns` for selecting which columns to display. - -Example: - -```bash -calicoctl get hostEndpoint --output=wide -``` - -Response: - -``` -NAME NODE INTERFACE IPS PROFILES -endpoint1 host1 1.2.3.4,0:bb::aa prof1,prof2 -myhost-eth0 myhost profile1 -``` - -#### `custom-columns` - -Similar to the `ps` format, the `custom-columns` option displays output in ps-style table output but allows the user -to specify and ordered, comma-separated list of columns to display in the output. The valid heading names for each -resource type is documented in the [Resources](../resources/overview.mdx) guide. - -Example: - -``` -calicoctl get hostEndpoint --output=custom-columns=NAME,IPS -``` - -Response: - -``` -NAME IPS -endpoint1 1.2.3.4,0:bb::aa -myhost-eth0 -``` - -#### `yaml / json` - -The `yaml` and `json` options display the output as a list of YAML documents or JSON dictionaries. The fields for -resource type are documented in the [Resources](../resources/overview.mdx) guide. - -The output from either of these formats may be used as input for all of the resource management commands. - -Example: - -```bash -calicoctl get hostEndpoint --output=yaml -``` - -Response: - -```yaml -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - labels: - type: database - name: endpoint1 - spec: - node: host1 - expectedIPs: - - 1.2.3.4 - - 0:bb::aa - profiles: - - prof1 - - prof2 -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: myhost-eth0 - spec: - node: myhost - profiles: - - profile1 -``` - -#### `go-template / go-template-file` - -The `go-template` and `go-template-file` options display the output using a golang template specified as a string -on the CLI, or defined in a separate file. -When writing a template, be aware that the data passed to the template is a golang slice of resource-lists. The -resource-lists are defined in the [libcalico API](../resources/overview.mdx) and there is a resource-list defined for -each resource type. A resource-list contains an Items field which is itself a slice of resources. Thus, to output -the "Name" field from the supplied data, it is necessary to enumerate over the slice of resource-lists and the items -within that list. - -Example: - -```bash -bin/calicoctl get hostEndpoint --output=go-template="{{range .}}{{range .Items}}{{.ObjectMeta.Name}},{{end}}{{end}}" -endpoint1,eth0, -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx). -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../resources/networkpolicy.mdx) for details on the {{prodname}} selector-based policy model diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/index.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/index.mdx deleted file mode 100644 index 710f6f7fb0..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/index.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- - -title: calicoctl -hide_table_of_contents: true - ---- - -import DocCardList from '@theme/DocCardList'; -import {useCurrentSidebarCategory} from '@docusaurus/theme-common'; - -Optional command line interface (CLI) to manage Calico resources. - -The calicoctl CLI tool allows management of Calico API resources, and can be used to perform other administrative tasks for managing a Calico installation. - -You can use kubectl to manage Calico resources instead by [installing the Calico API server](../../operations/install-apiserver.mdx). - - diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/check.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/check.mdx deleted file mode 100644 index c48cfa5959..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/check.mdx +++ /dev/null @@ -1,68 +0,0 @@ ---- -description: Command to check IPAM status ---- - -# calicoctl ipam check - -This section describes the `calicoctl ipam check` command. - -Read the [calicoctl overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam check' command - -Run `calicoctl ipam check --help` to display the following help menu for the command. - -``` -Usage: - calicoctl ipam check [--config=] [--show-all-ips] [--show-problem-ips] [-o ] - -Options: - -h --help Show this screen. - -o --output= Path to output report file. - --show-all-ips Print all IPs that are checked. - --show-problem-ips Print all IPs that are leaked or not allocated properly. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - The ipam check command checks the integrity of the IPAM datastructures against Kubernetes. -``` - -### Examples - -Example workflow for checking consistency and releasing leaked addresses. - -**Lock the data store** - -```bash -calicoctl datastore migrate lock -``` - -:::note - -Once the data store is locked, new pods will not be able to be launched until the data store is unlocked. - -::: - -**Generate a report using the check command** - -```bash -calicoctl ipam check -o report.json -``` - -**Release any unnecessary addresses** - -```bash -calicoctl ipam release --from-report report.json -``` - -**Unlock the data store** - -```bash -calicoctl datastore migrate unlock -``` - -## See also - -- [Installing calicoctl](../../../operations/calicoctl/install.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/configure.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/configure.mdx deleted file mode 100644 index 6220e1840f..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/configure.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -description: Command to change IPAM configuration. ---- - -# calicoctl ipam configure - -This section describes the `calicoctl ipam configure` command. - -Read the [calicoctl overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam configure' command - -Run `calicoctl ipam configure --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl ipam configure --strictaffinity= [--config=] - -Options: - -h --help Show this screen. - --strictaffinity= Set StrictAffinity to true/false. When StrictAffinity - is true, borrowing IP addresses is not allowed. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - Modify configuration for Calico IP address management. -``` - -### Examples - -```bash -calicoctl ipam configure --strictaffinity=true -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/calicoctl/install.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/index.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/index.mdx deleted file mode 100644 index 1049df9db0..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl IPAM commands for Calico-assigned IP addresses. -hide_table_of_contents: true ---- - -# ipam - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/overview.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/overview.mdx deleted file mode 100644 index 9eed90a72c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/overview.mdx +++ /dev/null @@ -1,41 +0,0 @@ ---- -description: Commands for calicoctl IP address management (IPAM). ---- - -# calicoctl ipam - -This section describes the `calicoctl ipam` commands. - -Read the [calicoctl Overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam' commands - -Run `calicoctl ipam --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl ipam [...] - - release Release a Calico assigned IP address. - show Show details of a Calico assigned IP address, - or of overall IP usage. - configure Configure IPAM - -Options: - -h --help Show this screen. - -Description: - IP Address Management specific commands for calicoctl. - - See 'calicoctl ipam --help' to read about a specific subcommand. -``` - -## IPAM specific commands - -Details on the `calicoctl ipam` commands are described in the documents linked below -organized by sub command. - -- [calicoctl ipam release](release.mdx) -- [calicoctl ipam show](show.mdx) -- [calicoctl ipam configure](configure.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/release.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/release.mdx deleted file mode 100644 index 9a4ac15b45..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/release.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -description: Command to release an IP address from Calico IP management. ---- - -# calicoctl ipam release - -This section describes the `calicoctl ipam release` command. - -Read the [calicoctl overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam release' command - -Run `calicoctl ipam release --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl ipam release [--ip=] [--from-report=] [--config=] - -Options: - -h --help Show this screen. - --ip= IP address to release. - --from-report= Release all leaked addresses from the report. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - The ipam release command releases an IP address from the Calico IP Address - Manager that was been previously assigned to an endpoint. When an IP address - is released, it becomes available for assignment to any endpoint. - - Note that this does not remove the IP from any existing endpoints that may be - using it, so only use this command to clean up addresses from endpoints that - were not cleanly removed from Calico. -``` - -### Examples - -```bash -calicoctl ipam release --ip=192.168.1.2 -``` - -```bash -calicoctl ipam release --from-report=./report.json -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/calicoctl/install.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/show.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/show.mdx deleted file mode 100644 index 0f09dd3707..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/show.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Command to see if IP address is being used. ---- - -# calicoctl ipam show - -This section describes the `calicoctl ipam show` command. - -Read the [calicoctl Overview](../overview.mdx) for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl ipam show' command - -Run `calicoctl ipam show --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl ipam show [--ip= | --show-blocks] [--config=] - -Options: - -h --help Show this screen. - --ip= Report whether this specific IP address is in use. - --show-blocks Show detailed information for IP blocks as well as pools. - --show-borrowed Show detailed information for "borrowed" IP addresses. - --show-configuration Show current Calico IPAM configuration. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -Description: - The ipam show command prints information about a given IP address, or about - overall IP usage. -``` - -### Examples - -1. Print the information associated with an IP address. - - ```bash - calicoctl ipam show --ip=192.168.1.2 - ``` - - The following result indicates that the IP is not assigned to an endpoint. - - ``` - IP 192.168.1.2 is not currently assigned - ``` - -1. Print the information associated with a different IP address. - - ```bash - calicoctl ipam show --ip=10.244.118.70 - ``` - - For a Kubernetes pod IP, attributes indicate the pod name and namespace: - - ``` - IP 10.244.118.70 is in use - Attributes: - pod: nano-66d4c99f8b-jm5s9 - namespace: default - node: ip-172-16-101-160.us-west-2.compute.internal - ``` - -1. Print a summary of IP usage. - - ```bash - calicoctl ipam show - ``` - - The table shows usage for each IP Pool: - - ``` - +----------+-------------------+------------+------------+-------------------+ - | GROUPING | CIDR | IPS TOTAL | IPS IN USE | IPS FREE | - +----------+-------------------+------------+------------+-------------------+ - | IP Pool | 10.65.0.0/16 | 65536 | 5 (0%) | 65531 (100%) | - | IP Pool | fd5f:abcd:64::/48 | 1.2089e+24 | 7 (0%) | 1.2089e+24 (100%) | - +----------+-------------------+------------+------------+-------------------+ - ``` - -1. Print more detailed IP usage by blocks. - - ```bash - calicoctl ipam show --show-blocks - ``` - - As well as the total usage per IP Pool, the table shows usage for block that has been allocated from those pools: - - ``` - +----------+-------------------------------------------+------------+------------+-------------------+ - | GROUPING | CIDR | IPS TOTAL | IPS IN USE | IPS FREE | - +----------+-------------------------------------------+------------+------------+-------------------+ - | IP Pool | 10.65.0.0/16 | 65536 | 5 (0%) | 65531 (100%) | - | Block | 10.65.79.0/26 | 64 | 5 (8%) | 59 (92%) | - | IP Pool | fd5f:abcd:64::/48 | 1.2089e+24 | 7 (0%) | 1.2089e+24 (100%) | - | Block | fd5f:abcd:64:4f2c:ec1b:27b9:1989:77c0/122 | 64 | 7 (11%) | 57 (89%) | - +----------+-------------------------------------------+------------+------------+-------------------+ - ``` - -1. Print more detailed information about borrowed IP addresses. - - ```bash - calicoctl ipam show --show-borrowed - ``` - - Table shows which IP addresses have been borrowed by which node out of which block and the entity consuming it: - - ``` - +------------+-----------------+---------------+---------------+------+------------------------------------+ - | IP | BORROWING-NODE | BLOCK | BLOCK OWNER | TYPE | ALLOCATED-TO | - +------------+-----------------+---------------+---------------+------+------------------------------------+ - | 172.16.0.1 | worker-node-1 | 172.16.0.0/29 | worker-node-2 | pod | external-ns/nginx-6db489d4b7-gln7h | - | 172.16.0.2 | worker-node-3 | 172.16.0.0/29 | worker-node-2 | pod | external-ns/nginx-6db489d4b7-kzkbv | - +------------+-----------------+---------------+---------------+------+------------------------------------+ - ``` - -1. Print current IPAM configuration. - - ```bash - calicoctl ipam show --show-configuration - ``` - - Table shows current IPAM configuration: - - ``` - +--------------------+-------+ - | PROPERTY | VALUE | - +--------------------+-------+ - | StrictAffinity | false | - | AutoAllocateBlocks | true | - +--------------------+-------+ - ``` - -### Options - -``` ---ip= Specific IP address to show. ---show-blocks Show detailed information for IP blocks as well as pools. ---show-borrowed Show detailed information for "borrowed" IP addresses. ---show-configuration Show current Calico IPAM configuration -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/calicoctl/install.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/split.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/split.mdx deleted file mode 100644 index ea2e6881f9..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/ipam/split.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -description: Command and options for splitting an existing IP pool ---- - -# calicoctl ipam split - -This section describes the `calicoctl ipam split` command. - -Read the [calicoctlOverview](../overview.mdx) -for a full list of calicoctl commands. - -## Display the help text for `calicoctl ipam split` command - -Run `calicoctl ipam split --help` to display the following help menu for the command. - -``` -Usage: - ipam split [--cidr=] [--name=] [--config=] [--allow-version-mismatch] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - --cidr= CIDR of the IP pool to split. - --name= Name of the IP pool to split. - --allow-version-mismatch Allow client and cluster versions mismatch. - -Description: - The ipam split command splits an IP pool specified by the specified CIDR or name - into the specified number of smaller IP pools. Each child IP pool will be of equal - size. IP pools can only be split into a number of smaller pools that is a power - of 2. - -Examples: - # Split the IP pool specified by 172.0.0.0/8 into 2 smaller pools - ipam split --cidr=172.0.0.0/8 2 -``` - -### Prerequisites - -To split an IP pool, you will first need to lock the Calico database -so that no IPAM data can change during the split. This is accomplished by using the -[`calicoctl datastore migrate lock` command](../datastore/migrate/lock.mdx). -To continue normal IPAM operation, you will need to unlock the calico datastore -after the split with the -[`calicoctl datastore migrate unlock` command](../datastore/migrate/unlock.mdx). - -### Examples - -Lock the Calico datastore. - -```bash -calicoctl datastore migrate lock -``` - -Split the IP pool specified by 172.0.0.0/15 into 2 smaller pools. - -```bash -calicoctl ipam split --cidr=172.0.0.0/15 2 -``` - -This should create 2 IP pools, one covering CIDR `172.0.0.0/16` -and one covering CIDR `172.1.0.0/16`. - -Unlock the Calico datastore to restore normal IPAM operation. - -```bash -calicoctl datastore migrate unlock -``` - -### General options - -``` - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Install calicoctl](../../../operations/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/label.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/label.mdx deleted file mode 100644 index 879deb10f1..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/label.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Command to change labels for workload endpoints or nodes. ---- - -# calicoctl label - -This section describes the `calicoctl label` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API). -Please refer to the -[Resources section](../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl label' command - -Run `calicoctl label --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl label ( - ( = [--overwrite] | - --remove ) - [--config=] [--namespace=]) - -Examples: - # Label a workload endpoint - calicoctl label workloadendpoints nginx --namespace=default app=web - - # Label a node and overwrite the original value of key 'cluster' - calicoctl label nodes node1 cluster=frontend --overwrite - - # Remove label with key 'cluster' of the node - calicoctl label nodes node1 cluster --remove - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --overwrite If true, overwrite the value when the key is already - present in labels. Otherwise reports error when the - labeled resource already have the key in its labels. - Can not be used with --remove. - --remove If true, remove the specified key in labels of the - resource. Reports error when specified key does not - exist. Can not be used with --overwrite. - --context= The name of the kubeconfig context to use. - -Description: - The label command is used to add or update a label on a resource. Resource types - that can be labeled are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * globalNetworkSet - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - - Attempting to label resources that do not exist will get an error. - - Attempting to remove a label that does not exist in the resource will get an error. - - When labeling a resource on an existing key: - - gets an error if option --overwrite is not provided. - - value of the key updates to specified value if option --overwrite is provided. -``` - -### Examples - -1. Label a node. - - ```bash - calicoctl label nodes node1 cluster=backend - ``` - - Results indicate that label was successfully applied. - - ``` - Successfully set label cluster on nodes node1 - ``` - -1. Label a node and overwrite the original value of key `cluster`. - - ```bash - calicoctl label nodes node1 cluster=frontend --overwrite - ``` - - Results indicate that label was successfully overwritten. - - ``` - Successfully updated label cluster on nodes node1 - ``` - -1. Remove label with key `cluster` from the node. - - ```bash - calicoctl label nodes node1 cluster --remove - ``` - - Results indicate that the label was successfully removed. - - ``` - Successfully removed label cluster from nodes node1. - ``` - -### Options - -``` - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. - --overwrite If true, overwrite the value when the key is already - present in labels. Otherwise reports error when the - labeled resource already have the key in its labels. - Can not be used with --remove. - --remove If true, remove the specified key in labels of the - resource. Reports error when specified key does not - exist. Can not be used with --overwrite. -``` - -### General options - -``` - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx) -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/node/checksystem.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/node/checksystem.mdx deleted file mode 100644 index a005110637..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/node/checksystem.mdx +++ /dev/null @@ -1,66 +0,0 @@ ---- -description: Command to check compatibility of host to run a Calico node instance. ---- - -# calicoctl node checksystem - -This section describes the `calicoctl node checksystem` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node checksystem' command - -Run `calicoctl node checksystem --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node checksystem [--kernel-config=] - -Options: - -h --help Show this screen. - -f --kernel-config= Override the Kernel config file location. - Expected format is plain text. - default search locations: - "/usr/src/linux/.config", - "/boot/config-kernelVersion, - "/usr/src/linux-kernelVersion/.config", - "/usr/src/linux-headers-kernelVersion/.config", - "/lib/modules/kernelVersion/build/.config" - -Description: - Check the compatibility of this compute host to run a Calico node instance. -``` - -### Procedure - -These are the steps that `calicoctl` takes to pinpoint what modules are available in your system. - -1. `calicoctl` checks the kernel version. -2. By executing `lsmod` it tries to find out what modules are enabled. -3. Modules without a match in step 2 will be checked against `/lib/modules//modules.dep` file. -4. Modules without a match in step 2 & 3 will be checked against `/lib/modules//modules.builtin` file. -5. Modules without a match in previous steps will be tested against `kernelconfig` file `/usr/src/linux/.config`. -6. Any remaining module will be tested against loaded iptables modules in `/proc/net/ip_tables_matches`. - -### Examples - -```bash -calicoctl node checksystem -``` - -An example response follows. - -``` -xt_conntrack OK -xt_u32 OK -WARNING: Unable to detect the xt_set module. Load with `modprobe xt_set` -WARNING: Unable to detect the ipip module. Load with `modprobe ipip` -``` - -It is possible to override the `kernel-config` file using `--kernel-config` argument. In this case `calicoctl` will try to resolve the modules against the provided file and skip the default locations. - -```bash -calicoctl node checksystem --kernel-config /root/MYKERNELFILE -``` diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/node/diags.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/node/diags.mdx deleted file mode 100644 index 86f87a07b8..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/node/diags.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -description: Command to get diagnostics from a Calico node. ---- - -# calicoctl node diags - -This section describes the `calicoctl node diags` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node diags' command - -Run `calicoctl node diags --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node diags [--log-dir=] - -Options: - -h --help Show this screen. - --log-dir= The directory containing Calico logs - [default: /var/log/calico] - -Description: - This command is used to gather diagnostic information from a Calico node. - This is usually used when trying to diagnose an issue that may be related to - your Calico network. - - The output of the command explains how to automatically upload the - diagnostics to http://transfer.sh for easy sharing of the data. Note that the - uploaded files will be deleted after 14 days. - - This command must be run on the specific Calico node that you are gathering - diagnostics for. -``` - -### Examples - -```bash -sudo calicoctl node diags -``` - -An example response follows. - -``` -Collecting diagnostics -Using temp dir: /tmp/calico676127473 -Dumping netstat -Dumping routes (IPv4) -Dumping routes (IPv6) -Dumping interface info (IPv4) -Dumping interface info (IPv6) -Dumping iptables (IPv4) -Dumping iptables (IPv6) -Dumping ipsets -exit status 1 -Dumping ipsets (container) -Copying journal for calico-node.service -Dumping felix stats -Copying Calico logs - -Diags saved to /tmp/calico676127473/diags-20170522_151219.tar.gz -If required, you can upload the diagnostics bundle to a file sharing service -such as transfer.sh using curl or similar. For example: - - curl --upload-file /tmp/calico676127473/diags-20170522_151219.tar.gz https://transfer.sh//tmp/calico676127473/diags-20170522_151219.tar.gz -``` - -### Options - -``` - --log-dir= The directory containing Calico logs. - [default: /var/log/calico] -``` diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/node/index.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/node/index.mdx deleted file mode 100644 index 625828c02d..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/node/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: calicoctl node commands. -hide_table_of_contents: true ---- - -# node - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/node/overview.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/node/overview.mdx deleted file mode 100644 index 57880633f6..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/node/overview.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -description: Commands for calicoctl node. ---- - -# calicoctl node - -This section describes the `calicoctl node` commands. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -Note that if you run `calicoctl` in a container, `calicoctl node ...` commands will -not work (they need access to parts of the host filesystem). - -## Displaying the help text for 'calicoctl node' commands - -Run `calicoctl node --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl node [...] - - run Run the Calico node container image - status View the current status of a Calico node. - diags Gather a diagnostics bundle for a Calico node. - checksystem Verify the compute host is able to run a Calico node instance. - -Options: - -h --help Show this screen. - -Description: - Node specific commands for calicoctl. These commands must be run directly on - the compute host running the Calico node instance. - - See 'calicoctl node --help' to read about a specific subcommand. -``` - -## Node specific commands - -Details on the `calicoctl node` commands are described in the documents linked below -organized by sub command. - -- [calicoctl node run](run.mdx) -- [calicoctl node status](status.mdx) -- [calicoctl node diags](diags.mdx) -- [calicoctl node checksystem](checksystem.mdx) diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/node/run.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/node/run.mdx deleted file mode 100644 index 3d2f9a60a6..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/node/run.mdx +++ /dev/null @@ -1,375 +0,0 @@ ---- -description: Command and options for running a Calico node. ---- - -# calicoctl node run - -This sections describes the `calicoctl node run` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node run' command - -Run `calicoctl node run --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node run [--ip=] [--ip6=] [--as=] - [--name=] - [--ip-autodetection-method=] - [--ip6-autodetection-method=] - [--log-dir=] - [--node-image=] - [--backend=(bird|none)] - [--config=] - [--felix-config=] - [--no-default-ippools] - [--dryrun] - [--init-system] - [--disable-docker-networking] - [--docker-networking-ifprefix=] - [--use-docker-networking-container-labels] - -Options: - -h --help Show this screen. - --name= The name of the Calico node. If this is not - supplied it defaults to the host name. - --as= Set the AS number for this node. If omitted, it - will use the value configured on the node resource. - If there is no configured value and --as option is - omitted, the node will inherit the global AS number - (see 'calicoctl config' for details). - --ip= Set the local IPv4 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip option is omitted, the node will - attempt to autodetect an IP address to use. Use a - value of 'autodetect' to always force autodetection - of the IP each time the node starts. - --ip6= Set the local IPv6 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip6 option is omitted, the node will not - route IPv6. Use a value of 'autodetect' to force - autodetection of the IP each time the node starts. - --ip-autodetection-method= - Specify the autodetection method for detecting the - local IPv4 routing address for this node. The valid - options are: - > first-found - Use the first valid IP address on the first - enumerated interface (common known exceptions are - filtered out, e.g. the docker bridge). It is not - recommended to use this if you have multiple - external interfaces on your host. - > can-reach= - Use the interface determined by your host routing - tables that will be used to reach the supplied - destination IP or domain name. - > interface= - Use the first valid IP address found on interfaces - named as per the first matching supplied interface - name regex. Regexes are separated by commas - (e.g. eth.*,enp0s.*). - > skip-interface= - Use the first valid IP address on the first - enumerated interface (same logic as first-found - above) that does NOT match with any of the - specified interface name regexes. Regexes are - separated by commas (e.g. eth.*,enp0s.*). - [default: first-found] - --ip6-autodetection-method= - Specify the autodetection method for detecting the - local IPv6 routing address for this node. See - ip-autodetection-method flag for valid options. - [default: first-found] - --log-dir= The directory containing Calico logs. - [default: /var/log/calico] - --node-image= - Docker image to use for Calico's per-node container. - [default: {{registry}}{{imageNames.calico/node}}:latest] - --backend=(bird|none) - Specify which networking backend to use. When set - to "none", Calico node runs in policy only mode. - [default: bird] - --dryrun Output the appropriate command, without starting the - container. - --init-system Run the appropriate command to use with an init - system. - --no-default-ippools Do not create default pools upon startup. - Default IP pools will be created if this is not set - and there are no pre-existing Calico IP pools. - --disable-docker-networking - Disable Docker networking. - --docker-networking-ifprefix= - Interface prefix to use for the network interface - within the Docker containers that have been networked - by the Calico driver. - [default: cali] - --use-docker-networking-container-labels - Extract the Calico-namespaced Docker container labels - (org.projectcalico.label.*) and apply them to the - container endpoints for use with Calico policy. - This option is only valid when using Calico Docker - networking, and when enabled traffic must be - explicitly allowed by configuring Calico policies. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - --felix-config= - Path to the file containing Felix - configuration in YAML or JSON format. - -Description: - This command is used to start a {{nodecontainer}} container instance which provides - Calico networking and network policy on your compute host. -``` - -### Kubernetes as the datastore - -When {{prodname}} is configured to use the Kubernetes API as the datastore, BGP routing is _currently_ -not supported. Many of the command line options related to BGP routing will -have no effect. These include: - -- `--ip`, `--ip6`, `--ip-autodetection-method`, `--ip6-autodetection-method` -- `--as` -- `--backend` - -### Examples - -Start the {{nodecontainer}} with a pre-configured IPv4 address for BGP. - -```bash -sudo calicoctl node run -``` - -An example response follows. - -``` -Running command to load modules: modprobe -a xt_set ip6_tables -Enabling IPv4 forwarding -Enabling IPv6 forwarding -Increasing conntrack limit -Running the following command: - -docker run --net=host --privileged --name={{noderunning}} -d --restart=always -e ETCD_SCHEME=http -e HOSTNAME=calico -e ETCD_AUTHORITY=127.0.0.1:2379 -e AS= -e NO_DEFAULT_POOLS= -e ETCD_ENDPOINTS= -e IP= -e IP6= -e CALICO_NETWORKING_BACKEND=bird -v /var/run/docker.sock:/var/run/docker.sock -v /var/run/calico:/var/run/calico -v /lib/modules:/lib/modules -v /var/log/calico:/var/log/calico -v /run/docker/plugins:/run/docker/plugins {{registry}}{{imageNames.calico/node}}:{{releaseTitle}} - -Waiting for etcd connection... -Using configured IPv4 address: 192.0.2.0 -No IPv6 address configured -Using global AS number -WARNING: Could not confirm that the provided IPv4 address is assigned to this host. -Calico node name: calico -Calico node started successfully -``` - -#### IP Autodetection method examples - -The node resource includes IPv4 and IPv6 routing IP addresses that should -match those on one of the host interfaces. These IP addresses may be -configured in advance by configuring the node resource prior to starting the -`{{nodecontainer}}` service, alternatively, the addresses may either be explicitly -specified or autodetected through options on the `calicoctl run` command. - -There are different autodetection methods available and you should use the one -best suited to your deployment. If you are able to explicitly specify the IP -addresses, that is always preferred over autodetection. This section describes -the available methods for autodetecting the hosts IP addresses. - -An IPv4 address is always required, and so if no address was previously -configured in the node resource, and no address was specified on the CLI, then -we will attempt to autodetect an IPv4 address. An IPv6 address, however, will -only be autodetected when explicitly requested. - -To force autodetection of an IPv4 address, use the option `--ip=autodetect`. To -force autodetection of an IPv6 address, use the option `--ip6=autodetect`. - -To set the autodetection method for IPv4, use the `--ip-autodetection-method` option. -To set the autodetection method for IPv6, use the `--ip6-autodetection-method` option. - -:::note - -If you are starting the `{{nodecontainer}}` container directly (and not using the -`calicoctl run` helper command), the options are passed in environment -variables. These are described in -[Configuring `{{nodecontainer}}`](../../configure-calico-node.mdx). - -::: - -**first-found** - -The `first-found` option enumerates all interface IP addresses and returns the -first valid IP address (based on IP version and type of address) on -the first valid interface. Certain known "local" interfaces -are omitted, such as the docker bridge. The order that both the interfaces -and the IP addresses are listed is system dependent. - -This is the default detection method. However, since this method only makes a -very simplified guess, it is recommended to either configure the node with a -specific IP address, or to use one of the other detection methods. - -An example with first-found auto detection method explicitly specified follows - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method first-found -``` - -**can-reach=DESTINATION** - -The `can-reach` method uses your local routing to determine which IP address -will be used to reach the supplied destination. Both IP addresses and domain -names may be used. - -An example with IP detection using a can-reach IP address: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method can-reach=8.8.8.8 -``` - -An example with IP detection using a can-reach domain name: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method can-reach=www.google.com -``` - -**interface=INTERFACE-REGEX,INTERFACE-REGEX,...** - -The `interface` method uses the supplied interface regular expressions (golang -syntax) to enumerate matching interfaces and to return the first IP address on -the first interface that matches any of the interface regexes provided. The -order that both the interfaces and the IP addresses are listed is system -dependent. - -Example with IP detection on interface eth0: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method interface=eth0 -``` - -Example with IP detection on interfaces eth0, eth1, eth2 etc.: - -```bash -sudo calicoctl node run --ip autodetect --ip-autodetection-method interface=eth.* -``` - -An example with IP detection on interfaces eth0, eth1, eth2 etc. and wlp2s0: - -```bash -sudo calicoctl node run --ip-autodetect --ip-autodetection-method interface=eth.*,wlp2s0 -``` - -**skip-interface=INTERFACE-REGEX,INTERFACE-REGEX,...** - -The `skip-interface` method uses the supplied interface regular expressions (golang -syntax) to enumerate all interface IP addresses and returns the first valid IP address -(based on IP version and type of address) that does not match the listed regular -expressions. Like the `first-found` option, it also skips by default certain known -"local" interfaces such as the docker bridge. The order that both the interfaces -and the IP addresses are listed is system dependent. - -This method has the ability to take in multiple regular expressions separated by `,`. -Specifying only one regular expression for interfaces to skip will also work and a -terminating `,` character does not need to be specified for those cases. - -### Options - -``` - --name= The name of the Calico node. If this is not - supplied it defaults to the host name. - --as= Set the AS number for this node. If omitted, it - will use the value configured on the node resource. - If there is no configured value and --as option is - omitted, the node will inherit the global AS number - (see 'calicoctl config' for details). - --ip= Set the local IPv4 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip option is omitted, the node will - attempt to autodetect an IP address to use. Use a - value of 'autodetect' to always force autodetection - of the IP each time the node starts. - --ip6= Set the local IPv6 routing address for this node. - If omitted, it will use the value configured on the - node resource. If there is no configured value - and the --ip6 option is omitted, the node will not - route IPv6. Use a value of 'autodetect' to force - autodetection of the IP each time the node starts. - --ip-autodetection-method= - Specify the autodetection method for detecting the - local IPv4 routing address for this node. The valid - options are: - > first-found - Use the first valid IP address on the first - enumerated interface (common known exceptions are - filtered out, e.g. the docker bridge). It is not - recommended to use this if you have multiple - external interfaces on your host. - > can-reach= - Use the interface determined by your host routing - tables that will be used to reach the supplied - destination IP or domain name. - > interface= - Use the first valid IP address found on interfaces - named as per the first matching supplied interface - name regex. Regexes are separated by commas - (e.g. eth.*,enp0s.*). - > skip-interface= - Use the first valid IP address on the first - enumerated interface (same logic as first-found - above) that does NOT match with any of the - specified interface name regexes. Regexes are - separated by commas (e.g. eth.*,enp0s.*). - [default: first-found] - --ip6-autodetection-method= - Specify the autodetection method for detecting the - local IPv6 routing address for this node. See - ip-autodetection-method flag for valid options. - [default: first-found] - --log-dir= The directory containing Calico logs. - [default: /var/log/calico] - --node-image= - Docker image to use for Calico's per-node container. - [default: {{registry}}{{imageNames.calico/node}}:latest] - --backend=(bird|none) - Specify which networking backend to use. When set - to "none", Calico node runs in policy only mode. - [default: bird] - --dryrun Output the appropriate command, without starting the - container. - --init-system Run the appropriate command to use with an init - system. - --no-default-ippools Do not create default pools upon startup. - Default IP pools will be created if this is not set - and there are no pre-existing Calico IP pools. - --disable-docker-networking - Disable Docker networking. - --docker-networking-ifprefix= - Interface prefix to use for the network interface - within the Docker containers that have been networked - by the Calico driver. - [default: cali] - --use-docker-networking-container-labels - Extract the Calico-namespaced Docker container labels - (org.projectcalico.label.*) and apply them to the - container endpoints for use with Calico policy. - This option is only valid when using Calico Docker - networking, and when enabled traffic must be - explicitly allowed by configuring Calico policies. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../../operations/calicoctl/install.mdx) -- [Resources](../../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [Policy](../../resources/networkpolicy.mdx) for details on the {{prodname}} selector-based policy model diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/node/status.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/node/status.mdx deleted file mode 100644 index 828811d3c7..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/node/status.mdx +++ /dev/null @@ -1,51 +0,0 @@ ---- -description: Command to check status of a Calico node instance. ---- - -# calicoctl node status - -This sections describes the `calicoctl node status` command. - -Read the [calicoctl Overview](../overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl node status' command - -Run `calicoctl node status --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl node status - -Options: - -h --help Show this screen. - -Description: - Check the status of the Calico node instance. This includes the status and - uptime of the node instance, and BGP peering states. -``` - -### Examples - -Check the status of a {{prodname}} instance. - -```bash -sudo calicoctl node status -``` - -Some sample results follow,. - -``` -Calico process is running. - -IPv4 BGP status -+--------------+-------------------+-------+----------+-------------+ -| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | -+--------------+-------------------+-------+----------+-------------+ -| 172.17.8.102 | node-to-node mesh | up | 23:30:04 | Established | -+--------------+-------------------+-------+----------+-------------+ - -IPv6 BGP status -No IPv6 peers found. -``` diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/overview.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/overview.mdx deleted file mode 100644 index fb50b4afb0..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/overview.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: The command line interface tool (CLI) to manage Calico network and security policy. ---- - -# calicoctl user reference - -The command line tool, `calicoctl`, makes it easy to manage {{prodname}} -network and security policy, as well as other {{prodname}} configurations. - -The full list of resources that can be managed, including a description of each, -is described in the [Resource definitions](../resources/overview.mdx) -section. - -:::note - -This section provides full reference information for `calicoctl`. To learn -how to install and configure `calicoctl`, refer to -[Installing calicoctl](../../operations/calicoctl/install.mdx). - -::: - -The calicoctl command line interface provides a number of resource management -commands to allow you to create, modify, delete, and view the different -{{prodname}} resources. This section is a command line reference for -`calicoctl`, organized based on the command hierarchy. - -## Top level help - -Run `calicoctl --help` to display the following help menu for the top level -calicoctl commands. - -``` -Usage: - calicoctl [options] [...] - - create Create a resource by file, directory or stdin. - replace Replace a resource by file, directory or stdin. - apply Apply a resource by file, directory or stdin. This creates a resource - if it does not exist, and replaces a resource if it does exists. - patch Patch a pre-existing resource in place. - delete Delete a resource identified by file, directory, stdin or resource type and - name. - get Get a resource identified by file, directory, stdin or resource type and - name. - label Add or update labels of resources. - convert Convert config files between different API versions. - ipam IP address management. - node Calico node management. - version Display the version of calicoctl. - -Options: - -h --help Show this screen. - -l --log-level= Set the log level (one of panic, fatal, error, - warn, info, debug) [default: panic] - --context= The name of the kubeconfig context to use. - --allow-version-mismatch Allow client and cluster versions mismatch. - -Description: - The calicoctl command line tool is used to manage Calico network and security - policy, to view and manage endpoint configuration, and to manage a Calico - node instance. - - See 'calicoctl --help' to read about a specific subcommand. -``` - -:::note - -In a multi cluster environment if you have a [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file with multiple cluster contexts it is possible to directly change the context using calicoctl `--context` argument. - -::: - -:::note - -The versions for Calico and calicoctl should be the same and calls to calicoctl will fail if the versions do not match. If needed, this can be overridden by using the `--allow-version-mismatch` argument. - -::: - -## Top level command line options - -Details on the `calicoctl` commands are described in the documents linked below -organized by top level command. - -- [calicoctl create](create.mdx) -- [calicoctl replace](replace.mdx) -- [calicoctl apply](apply.mdx) -- [calicoctl patch](patch.mdx) -- [calicoctl delete](delete.mdx) -- [calicoctl get](get.mdx) -- [calicoctl label](label.mdx) -- [calicoctl convert](convert.mdx) -- [calicoctl ipam](ipam/overview.mdx) -- [calicoctl node](node/index.mdx) -- [calicoctl version](version.mdx) - -## Modifying low-level component configurations - -To update low-level Felix or BGP settings (`FelixConfiguration` and `BGPConfiguration` resource types): - -1. Get the appropriate resource and store the yaml output in a file using `calicoctl get -o yaml --export > config.yaml`. -1. Modify the saved resource file. -1. Update the resource using `apply` or `replace` command: `calicoctl replace -f config.yaml`. - -See [Configuring Felix](../felix/configuration.mdx) for more details. - -## Supported resource definition aliases - -The following table lists supported aliases for {{prodname}} resources when using `calicoctl`. Note that all aliases -are **case-insensitive**. - -| Resource definition | Supported calicoctl aliases | -| :----------------------------------- | :---------------------------------------------------------------------------- | -| BGP configuration | `bgpconfig`, `bgpconfigurations`, `bgpconfigs` | -| BGP peer | `bgppeer`, `bgppeers`, `bgpp`, `bgpps`, `bp`, `bps` | -| Felix configuration | `felixconfiguration`, `felixconfig`, `felixconfigurations`, `felixconfigs` | -| Global network policy | `globalnetworkpolicy`, `globalnetworkpolicies`, `gnp`, `gnps` | -| Global network set | `globalnetworkset`, `globalnetworksets` | -| Host endpoint | `hostendpoint`, `hostendpoints`, `hep`, `heps` | -| IP pool | `ippool`, `ippools`, `ipp`, `ipps`, `pool`, `pools` | -| IP reservation | `ipreservation`, `ipreservations`, `reservation`, `reservations` | -| Kubernetes controllers configuration | `kubecontrollersconfiguration`, `kubecontrollersconfig` | -| Network policy | `networkpolicy`, `networkpolicies`, `policy`, `np`, `policies`, `pol`, `pols` | -| Node | `node`, `nodes`, `no`, `nos` | -| Profiles | `profile`, `profiles`, `pro`, `pros` | -| Workload endpoint | `workloadendpoint`, `workloadendpoints`, `wep`, `weps` | diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/patch.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/patch.mdx deleted file mode 100644 index e7bf8dbf1c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/patch.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -description: Command to update a node with a patch. ---- - -# calicoctl patch - -This sections describes the `calicoctl patch` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl patch' command - -Run `calicoctl patch --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl patch --patch= [--type=] [--config=] [--namespace=] - -Examples: - # Partially update a node using a strategic merge patch. - calicoctl patch node node-0 --patch '{"spec":{"bgp": {"routeReflectorClusterID": "CLUSTER_ID"}}}' - - # Partially update a node using a json merge patch. - calicoctl patch node node-0 --patch '{"spec":{"bgp": {"routeReflectorClusterID": "CLUSTER_ID"}}}' --type json - -Options: - -h --help Show this screen. - -p --patch= Spec to use to patch the resource. - -t --type= Format of patch type: - strategic Strategic merge patch (default) - json JSON Patch, RFC 6902 (not yet implemented) - merge JSON Merge Patch, RFC 7386 (not yet implemented) - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: ` + constants.DefaultConfigPath + `] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The patch command is used to patch a specific resource by type and identifiers in place. - Currently, only JSON format is accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * globalNetworkSet - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - The resource type is case-insensitive and may be pluralized. - Attempting to patch a resource that does not exists is treated as a - terminating error unless the --skip-not-exists flag is set. If this flag is - set, resources that do not exist are skipped. - - When patching resources by type, only a single type may be specified at a - time. The name is required along with any and other identifiers required to - uniquely identify a resource of the specified type. -``` - -### Examples - -1. Patch an IP Pool to enable outgoing NAT: - - ```bash - calicoctl patch ippool ippool1 -p '{"spec":{"natOutgoing": true}}' - ``` - - Results indicate that a resource was successfully patched: - - ``` - Successfully patched 1 'ipPool' resource - ``` - -### Options - -``` --p --patch= Spec to use to patch the resource. --t --type= Format of patch type: - strategic Strategic merge patch (default) - json JSON Patch, RFC 6902 (not yet implemented) - merge JSON Merge Patch, RFC 7386 (not yet implemented) --n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy and WorkloadEndpoint. - Uses the default namespace if not specified. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx) -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/replace.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/replace.mdx deleted file mode 100644 index 22481929d6..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/replace.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -description: Command to replace an existing policy with a different one. ---- - -# calicoctl replace - -This sections describes the `calicoctl replace` command. - -Read the [calicoctl command line interface user reference](overview.mdx) -for a full list of calicoctl commands. - -:::note - -The available actions for a specific resource type may be -limited based on the datastore used for {{prodname}} (etcdv3 / Kubernetes API). -Please refer to the -[Resources section](../resources/overview.mdx) -for details about each resource type. - -::: - -## Displaying the help text for 'calicoctl replace' command - -Run `calicoctl replace --help` to display the following help menu for the -command. - -``` -Usage: - calicoctl replace --filename= [--recursive] [--skip-empty] [--config=] [--namespace=] - -Examples: - # Replace a policy using the data in policy.yaml. - calicoctl replace -f ./policy.yaml - - # Replace a policy based on the JSON passed into stdin. - cat policy.json | calicoctl replace -f - - -Options: - -h --help Show this screen. - -f --filename= Filename to use to replace the resource. If set - to "-" loads from stdin. If filename is a directory, this command is - invoked for each .json .yaml and .yml file within that directory, - terminating after the first failure. - -R --recursive Process the filename specified in -f or --filename recursively. - --skip-empty Do not error if any files or directory specified using -f or --filename contain no - data. - -c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - -n --namespace= Namespace of the resource. - Only applicable to NetworkPolicy, NetworkSet, and WorkloadEndpoint. - Uses the default namespace if not specified. - --context= The name of the kubeconfig context to use. - -Description: - The replace command is used to replace a set of resources by filename or - stdin. JSON and YAML formats are accepted. - - Valid resource types are: - - * bgpConfiguration - * bgpPeer - * felixConfiguration - * globalNetworkPolicy - * hostEndpoint - * ipPool - * networkPolicy - * networkSet - * node - * profile - * workloadEndpoint - - Attempting to replace a resource that does not exist is treated as a - terminating error. - - The output of the command indicates how many resources were successfully - replaced, and the error reason if an error occurred. - - The resources are replaced in the order they are specified. In the event of - a failure replacing a specific resource it is possible to work out which - resource failed based on the number of resources successfully replaced. - - When replacing a resource, the complete resource spec must be provided, it is - not sufficient to supply only the fields that are being updated. -``` - -### Examples - -1. Replace a set of resources (of mixed type) using the data in resources.yaml. - - ```bash - calicoctl replace -f ./resources.yaml - ``` - - Results indicate that 8 resources were successfully replaced. - - ``` - Successfully replaced 8 resource(s) - ``` - -1. Replace a policy based on the JSON passed into stdin. - - ```bash - cat policy.json | calicoctl replace -f - - ``` - - Results indicate the policy does not exist. - - ``` - Failed to replace any 'policy' resources: resource does not exist: Policy(name=dbPolicy) - ``` - -### Options - -``` --f --filename= Filename to use to replace the resource. If set - to "-" loads from stdin. -``` - -### General options - -``` --c --config= Path to the file containing connection - configuration in YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] -``` - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx) -- [Resources](../resources/overview.mdx) for details on all valid resources, including file format - and schema -- [NetworkPolicy](../resources/networkpolicy.mdx) for details on the {{prodname}} selector-based policy model diff --git a/calico_versioned_docs/version-3.25/reference/calicoctl/version.mdx b/calico_versioned_docs/version-3.25/reference/calicoctl/version.mdx deleted file mode 100644 index a053d8a0c9..0000000000 --- a/calico_versioned_docs/version-3.25/reference/calicoctl/version.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: Command to display the calicoctl CLI version. ---- - -# calicoctl version - -import CalicoctlVersion from '@site/calico_versioned_docs/version-3.25/_includes/content/_calicoctl-version.mdx'; - -This section describes the `calicoctl version` command. - -Read the [calicoctl Overview](overview.mdx) -for a full list of calicoctl commands. - -## Displaying the help text for 'calicoctl version' commands - -Run `calicoctl version --help` to display the following help menu for the -commands. - -``` -Usage: - calicoctl version [--config=] [--poll=] - -Options: - -h --help Show this screen. - -c --config= Path to the file containing connection configuration in - YAML or JSON format. - [default: /etc/calico/calicoctl.cfg] - --poll= Poll for changes to the cluster information at a frequency specified using POLL duration - (e.g. 1s, 10m, 2h etc.). A value of 0 (the default) disables polling. - -Description: - Display the version of calicoctl. -``` - -### Example - -Use `calicoctl version` to obtain the following data. - - - -\* To obtain these values, you must configure `calicoctl` -[to connect to your datastore](../../operations/calicoctl/configure/overview.mdx). - -## See also - -- [Installing calicoctl](../../operations/calicoctl/install.mdx). diff --git a/calico_versioned_docs/version-3.25/reference/configure-calico-node.mdx b/calico_versioned_docs/version-3.25/reference/configure-calico-node.mdx deleted file mode 100644 index 488f70011a..0000000000 --- a/calico_versioned_docs/version-3.25/reference/configure-calico-node.mdx +++ /dev/null @@ -1,324 +0,0 @@ ---- -description: Customize calico/node using environment variables. ---- - -# Configuring calico/node - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -The `{{nodecontainer}}` container is deployed to every node (on Kubernetes, by a DaemonSet), and runs three internal daemons: - -- Felix, the Calico daemon that runs on every node and provides endpoints. -- BIRD, the BGP daemon that distributes routing information to other nodes. -- confd, a daemon that watches the Calico datastore for config changes and updates BIRD’s config files. - -For manifest-based installations, `{{nodecontainer}}` is primarily configured through environment -variables, typically set in the deployment manifest. Individual nodes may also be updated through the Node -custom resource. `{{nodecontainer}}` can also be configured through the Calico Operator. - -The rest of this page lists the available configuration options, and is followed by specific considerations for -various settings. - - - - -`{{nodecontainer}}` does not need to be configured directly when installed by the operator. For a complete operator -configuration reference, see [the installation API reference documentation][installation]. - - - - -## Environment variables - -### Configuring the default IP pool(s) - -Calico uses IP pools to configure how addresses are allocated to pods, and how networking works for certain -sets of addresses. You can see the full schema for IP pools here. - -`{{nodecontainer}}` can be configured to create a default IP pool for you, but only if none already -exist in the cluster. The following options control the parameters on the created pool. - -| Environment | Description | Schema | -| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | -| CALICO_IPV4POOL_CIDR | The IPv4 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. [Default: First not used in locally of (192.168.0.0/16, 172.16.0.0/16, .., 172.31.0.0/16) ] | IPv4 CIDR | -| CALICO_IPV4POOL_BLOCK_SIZE | Block size to use for the IPv4 Pool created at startup. Block size for IPv4 should be in the range 20-32 (inclusive) [Default: `26`] | int | -| CALICO_IPV4POOL_IPIP | IPIP Mode to use for the IPv4 Pool created at start up. If set to a value other than `Never`, `CALICO_IPV4POOL_VXLAN` should not be set. [Default: `Always`] | Always, CrossSubnet, Never ("Off" is also accepted as a synonym for "Never") | -| CALICO_IPV4POOL_VXLAN | VXLAN Mode to use for the IPv4 Pool created at start up. If set to a value other than `Never`, `CALICO_IPV4POOL_IPIP` should not be set. [Default: `Never`] | Always, CrossSubnet, Never | -| CALICO_IPV4POOL_NAT_OUTGOING | Controls NAT Outgoing for the IPv4 Pool created at start up. [Default: `true`] | boolean | -| CALICO_IPV4POOL_NODE_SELECTOR | Controls the NodeSelector for the IPv4 Pool created at start up. [Default: `all()`] | [selector](resources/ippool.mdx#node-selector) | -| CALICO_IPV6POOL_CIDR | The IPv6 Pool to create if none exists at start up. It is invalid to define this variable and NO_DEFAULT_POOLS. [Default: ``] | IPv6 CIDR | -| CALICO_IPV6POOL_BLOCK_SIZE | Block size to use for the IPv6 POOL created at startup. Block size for IPv6 should be in the range 116-128 (inclusive) [Default: `122`] | int | -| CALICO_IPV6POOL_VXLAN | VXLAN Mode to use for the IPv6 Pool created at start up. [Default: `Never`] | Always, CrossSubnet, Never | -| CALICO_IPV6POOL_NAT_OUTGOING | Controls NAT Outgoing for the IPv6 Pool created at start up. [Default: `false`] | boolean | -| CALICO_IPV6POOL_NODE_SELECTOR | Controls the NodeSelector for the IPv6 Pool created at start up. [Default: `all()`] | [selector](resources/ippool.mdx#node-selector) | -| CALICO_IPV4POOL_DISABLE_BGP_EXPORT | Disable exporting routes over BGP for the IPv4 Pool created at start up. [Default: `false`] | boolean | -| CALICO_IPV6POOL_DISABLE_BGP_EXPORT | Disable exporting routes over BGP for the IPv6 Pool created at start up. [Default: `false`] | boolean | -| NO_DEFAULT_POOLS | Prevents {{prodname}} from creating a default pool if one does not exist. [Default: `false`] | boolean | - -### Configuring BGP Networking - -BGP configuration for Calico nodes is normally configured through the [Node](resources/node.mdx), [BGPConfiguration](resources/bgpconfig.mdx), and [BGPPeer](resources/bgppeer.mdx) resources. -`{{nodecontainer}}` also exposes some options to allow setting certain fields on these objects, as described -below. - -| Environment | Description | Schema | -| ------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------- | -| NODENAME | A unique identifier for this host. See [node name determination](#node-name-determination) for more details. | lowercase string | -| IP | The IPv4 address to assign this host or detection behavior at startup. Refer to [IP setting](#ip-setting) for the details of the behavior possible with this field. | IPv4 | -| IP6 | The IPv6 address to assign this host or detection behavior at startup. Refer to [IP setting](#ip-setting) for the details of the behavior possible with this field. | IPv6 | -| IP_AUTODETECTION_METHOD | The method to use to autodetect the IPv4 address for this host. This is only used when the IPv4 address is being autodetected. See [IP Autodetection methods](#ip-autodetection-methods) for details of the valid methods. [Default: `first-found`] | string | -| IP6_AUTODETECTION_METHOD | The method to use to autodetect the IPv6 address for this host. This is only used when the IPv6 address is being autodetected. See [IP Autodetection methods](#ip-autodetection-methods) for details of the valid methods. [Default: `first-found`] | string | -| AS | The AS number for this node. When specified, the value is saved in the node resource configuration for this host, overriding any previously configured value. When omitted, if an AS number has been previously configured in the node resource, that AS number is used for the peering. When omitted, if an AS number has not yet been configured in the node resource, the node will use the global value (see [example modifying Global BGP settings](../networking/configuring/bgp.mdx) for details.) | int | -| CALICO_ROUTER_ID | Sets the `router id` to use for BGP if no IPv4 address is set on the node. For an IPv6-only system, this may be set to `hash`. It then uses the hash of the nodename to create a 4 byte router id. See note below. [Default: ``] | string | -| CALICO_K8S_NODE_REF | The name of the corresponding node object in the Kubernetes API. When set, used for correlating this node with events from the Kubernetes API. | string | - -### Configuring Datastore Access - -| Environment | Description | Schema | -| -------------- | ------------------------------------------ | ------------------ | -| DATASTORE_TYPE | Type of datastore. [Default: `kubernetes`] | kubernetes, etcdv3 | - -#### Configuring Kubernetes Datastore Access - -| Environment | Description | Schema | -| ---------------- | ------------------------------------------------------------------------------ | ------ | -| KUBECONFIG | When using the Kubernetes datastore, the location of a kubeconfig file to use. | string | -| K8S_API_ENDPOINT | Location of the Kubernetes API. Not required if using kubeconfig. | string | -| K8S_CERT_FILE | Location of a client certificate for accessing the Kubernetes API. | string | -| K8S_KEY_FILE | Location of a client key for accessing the Kubernetes API. | string | -| K8S_CA_FILE | Location of a CA for accessing the Kubernetes API. | string | - -:::note - -When {{prodname}} is configured to use the Kubernetes API as the datastore, the environments -used for BGP configuration are ignored—this includes selection of the node AS number (AS) -and all of the IP selection options (IP, IP6, IP_AUTODETECTION_METHOD, IP6_AUTODETECTION_METHOD). - -::: - -#### Configuring etcd Datastore Access - -| Environment | Description | Schema | -| ------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| ETCD_ENDPOINTS | A comma separated list of etcd endpoints [Example: `http://127.0.0.1:2379,http://127.0.0.2:2379`] (required) | string | -| ETCD_DISCOVERY_SRV | Domain name to discover etcd endpoints via SRV records. Mutually exclusive with `ETCD_ENDPOINTS`. [Example: `example.com`] (optional) | string | -| ETCD_KEY_FILE | Path to the file containing the private key matching the `{{nodecontainer}}` client certificate. Enables `{{nodecontainer}}` to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/node/key.pem` (optional) | string | -| ETCD_CERT_FILE | Path to the file containing the client certificate issued to `{{nodecontainer}}`. Enables `{{nodecontainer}}` to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/node/cert.pem` (optional) | string | -| ETCD_CA_CERT_FILE | Path to the file containing the root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures `{{nodecontainer}}` to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing `{{nodecontainer}}` to trust each of the CAs included. Example: `/etc/node/ca.pem` | string | - -### Configuring Logging - -| Environment | Description | Schema | -| --------------------------- | -------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------- | -| CALICO_DISABLE_FILE_LOGGING | Disables logging to file. [Default: "false"] | string | -| CALICO_STARTUP_LOGLEVEL | The log severity above which startup `{{nodecontainer}}` logs are sent to the stdout. [Default: `ERROR`] | DEBUG, INFO, WARNING, ERROR, CRITICAL, or NONE (case-insensitive) | - -### Configuring CNI Plugin - -`{{nodecontainer}}` has a few options that are configurable based on the CNI plugin and CNI plugin -configuration used on the cluster. - -| Environment | Description | Schema | -| ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| USE_POD_CIDR | Use the Kubernetes `Node.Spec.PodCIDR` field when using host-local IPAM. Requires Kubernetes API datastore. This field is required when using the Kubernetes API datastore with host-local IPAM. [Default: false] | boolean | -| CALICO_MANAGE_CNI | Tells Calico to update the kubeconfig file at /host/etc/cni/net.d/calico-kubeconfig on credentials change. [Default: true] | boolean | - -### Other Environment Variables - -| Environment | Description | Schema | -| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------- | -| DISABLE_NODE_IP_CHECK | Skips checks for duplicate Node IPs. This can reduce the load on the cluster when a large number of Nodes are restarting. [Default: `false`] | boolean | -| WAIT_FOR_DATASTORE | Wait for connection to datastore before starting. If a successful connection is not made, node will shutdown. [Default: `false`] | boolean | -| CALICO_NETWORKING_BACKEND | The networking backend to use. In `bird` mode, Calico will provide BGP networking using the BIRD BGP daemon; VXLAN networking can also be used. In `vxlan` mode, only VXLAN networking is provided; BIRD and BGP are disabled. If set to `none` (also known as policy-only mode), both BIRD and VXLAN are disabled. [Default: `bird`] | bird, vxlan, none | -| CLUSTER_TYPE | Contains comma delimited list of indicators about this cluster. e.g. k8s, mesos, kubeadm, canal, bgp | string | - -## Appendix - -### Node name determination - -The `{{nodecontainer}}` must know the name of the node on which it is running. The node name is used to -retrieve the [Node resource](resources/node.mdx) configured for this node if it exists, or to create a new node resource representing the node if it does not. It is -also used to associate the node with per-node [BGP configuration](resources/bgpconfig.mdx), [felix configuration](resources/felixconfig.mdx), and endpoints. - -When launched, the `{{nodecontainer}}` container sets the node name according to the following order of precedence: - -1. The value specified in the `NODENAME` environment variable, if set. -1. The value specified in `/var/lib/calico/nodename`, if it exists. -1. The value specified in the `HOSTNAME` environment variable, if set. -1. The hostname as returned by the operating system, converted to lowercase. - -Once the node has determined its name, the value will be cached in `/var/lib/calico/nodename` for future use. - -For example, if given the following conditions: - -- `NODENAME=""` -- `/var/lib/calico/nodename` does not exist -- `HOSTNAME="host-A"` -- The operating system returns "host-A.internal.myorg.com" for the hostname - -{{nodecontainer}} will use "host-a" for its name and will write the value in `/var/lib/calico/nodename`. If {{nodecontainer}} -is then restarted, it will use the cached value of "host-a" read from the file on disk. - -### IP setting - -The IP (for IPv4) and IP6 (for IPv6) environment variables are used to set, -force autodetection, or disable auto detection of the address for the -appropriate IP version for the node. When the environment variable is set, -the address is saved in the -[node resource configuration](resources/node.mdx) -for this host, overriding any previously configured value. - -calico/node will attempt to detect subnet information from the host, and augment the provided address -if possible. - -#### IP setting special case values - -There are several special case values that can be set in the IP(6) environment variables, they are: - -- Not set or empty string: Any previously set address on the node - resource will be used. If no previous address is set on the node resource - the two versions behave differently: - - IP will do autodetection of the IPv4 address and set it on the node - resource. - - IP6 will not do autodetection. -- `autodetect`: Autodetection will always be performed for the IP address and - the detected address will overwrite any value configured in the node - resource. -- `none`: Autodetection will not be performed (this is useful to disable IPv4). - -### IP autodetection methods - -When {{prodname}} is used for routing, each node must be configured with an IPv4 -address and/or an IPv6 address that will be used to route between -nodes. To eliminate node specific IP address configuration, the `{{nodecontainer}}` -container can be configured to autodetect these IP addresses. In many systems, -there might be multiple physical interfaces on a host, or possibly multiple IP -addresses configured on a physical interface. In these cases, there are -multiple addresses to choose from and so autodetection of the correct address -can be tricky. - -The IP autodetection methods are provided to improve the selection of the -correct address, by limiting the selection based on suitable criteria for your -deployment. - -The following sections describe the available IP autodetection methods. - -#### first-found - -The `first-found` option enumerates all interface IP addresses and returns the -first valid IP address (based on IP version and type of address) on -the first valid interface. Certain known "local" interfaces -are omitted, such as the docker bridge. The order that both the interfaces -and the IP addresses are listed is system dependent. - -This is the default detection method. However, since this method only makes a -very simplified guess, it is recommended to either configure the node with a -specific IP address, or to use one of the other detection methods. - -e.g. - -``` -IP_AUTODETECTION_METHOD=first-found -IP6_AUTODETECTION_METHOD=first-found -``` - -#### kubernetes-internal-ip - -The `kubernetes-internal-ip` method will select the first internal IP address listed in the Kubernetes node's `Status.Addresses` field - -Example: - -``` -IP_AUTODETECTION_METHOD=kubernetes-internal-ip -IP6_AUTODETECTION_METHOD=kubernetes-internal-ip -``` - -#### can-reach=DESTINATION - -The `can-reach` method uses your local routing to determine which IP address -will be used to reach the supplied destination. Both IP addresses and domain -names may be used. - -Example using IP addresses: - -``` -IP_AUTODETECTION_METHOD=can-reach=8.8.8.8 -IP6_AUTODETECTION_METHOD=can-reach=2001:4860:4860::8888 -``` - -Example using domain names: - -``` -IP_AUTODETECTION_METHOD=can-reach=www.google.com -IP6_AUTODETECTION_METHOD=can-reach=www.google.com -``` - -#### interface=INTERFACE-REGEX - -The `interface` method uses the supplied interface [regular expression](https://pkg.go.dev/regexp) -to enumerate matching interfaces and to return the first IP address on -the first matching interface. The order that both the interfaces -and the IP addresses are listed is system dependent. - -Example with valid IP address on interface eth0, eth1, eth2 etc.: - -``` -IP_AUTODETECTION_METHOD=interface=eth.* -IP6_AUTODETECTION_METHOD=interface=eth.* -``` - -#### skip-interface=INTERFACE-REGEX - -The `skip-interface` method uses the supplied interface [regular expression](https://pkg.go.dev/regexp) -to exclude interfaces and to return the first IP address on the first -interface that does not match. The order that both the interfaces -and the IP addresses are listed is system dependent. - -Example with valid IP address on interface exclude enp6s0f0, eth0, eth1, eth2 etc.: - -``` -IP_AUTODETECTION_METHOD=skip-interface=enp6s0f0,eth.* -IP6_AUTODETECTION_METHOD=skip-interface=enp6s0f0,eth.* -``` - -#### cidr=CIDR - -The `cidr` method will select any IP address from the node that falls within the given CIDRs. For example: - -Example: - -``` -IP_AUTODETECTION_METHOD=cidr=10.0.1.0/24,10.0.2.0/24 -IP6_AUTODETECTION_METHOD=cidr=2001:4860::0/64 -``` - -### Node readiness - -The `calico/node` container supports an exec readiness endpoint. - -To access this endpoint, use the following command. - -```bash -docker exec calico-node /bin/calico-node [flag] -``` - -Substitute `[flag]` with one or more of the following. - -- `-bird-ready` -- `-bird6-ready` -- `-felix-ready` - -The BIRD readiness endpoint ensures that the BGP mesh is healthy by verifying that all BGP peers are established and -no graceful restart is in progress. If the BIRD readiness check is failing due to unreachable peers that are no longer -in the cluster, see [decommissioning a node](../operations/decommissioning-a-node.mdx). - -### Setting `CALICO_ROUTER_ID` for IPv6 only system - -Setting CALICO_ROUTER_ID to value `hash` will use a hash of the configured nodename for the router ID. This should only be used in IPv6-only systems with no IPv4 address to use for the router ID. Since each node chooses its own router ID in isolation, it is possible for two nodes to pick the same ID resulting in a clash. The probability of such a clash grows with cluster size so this feature should not be used in a large cluster (500+ nodes). - - - - - -[installation]: installation/api.mdx diff --git a/calico_versioned_docs/version-3.25/reference/configure-cni-plugins.mdx b/calico_versioned_docs/version-3.25/reference/configure-cni-plugins.mdx deleted file mode 100644 index 228d99366c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/configure-cni-plugins.mdx +++ /dev/null @@ -1,594 +0,0 @@ ---- -description: Details for configuring the Calico CNI plugins. ---- - -# Configure the Calico CNI plugins - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -:::note - -The {{prodname}} CNI plugins do not need to be configured directly when installed by the operator. -For a complete operator configuration reference, see [the installation API reference documentation][installation]. - -::: - -The {{prodname}} CNI plugin is configured through the standard CNI -[configuration mechanism](https://github.com/containernetworking/cni/blob/master/SPEC.md#network-configuration) - -A minimal configuration file that uses {{prodname}} for networking -and IPAM looks like this - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - } -} -``` - -If the `{{nodecontainer}}` container on a node registered with a `NODENAME` other than the node hostname, the CNI plugin on this node must be configured with the same `nodename`: - -```json -{ - "name": "any_name", - "nodename": "", - "type": "calico", - "ipam": { - "type": "calico-ipam" - } -} -``` - -Additional configuration can be added as detailed below. - -## Generic - -### Datastore type - -The following option allows configuration of the {{prodname}} datastore type. - -- `datastore_type` (default: etcdv3) - -The {{prodname}} CNI plugin supports the following datastore types: - -- etcdv3 (default) -- kubernetes - -### etcd location - -The following options are valid when `datastore_type` is `etcdv3`. - -Configure access to your etcd cluster using the following options. - -| Option name | Default | Description | -| -------------------- | ------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------ | -| `etcd_endpoints` | None | Comma-separated list of endpoints. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379` | string | -| `etcd_discovery_srv` | None | Domain name to discover etcd endpoints via SRV records. Mutually exclusive with `etcdEndpoints`. Example: `example.com` (optional) | string | -| `etcd_key_file` | None | Path to the file containing the private key matching the CNI plugin's client certificate. Enables the CNI plugin to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/calico-cni/key.pem` (optional) | string | -| `etcd_cert_file` | None | Path to the file containing the client certificate issued to the CNI plugin. Enables the CNI plugin to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/calico-cni/cert.pem` (optional) | string | -| `etcd_ca_cert_file` | None | Path to the file containing the root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures the CNI plugin to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing the CNI plugin to trust each of the CAs included. | string | - -The following options are deprecated. - -- `etcd_authority` (default is `127.0.0.1:2379`) - - If `etcd_authority` is set at the same time as `etcd_endpoints` then `etcd_endpoints` is used. -- `etcd_scheme` (default is `http`) - -### Logging - -Logging is always to `stderr`. Logs are also written to `/var/log/calico/cni/cni.log` on each host by default. - -Logging can be configured using the following options in the netconf. - -| Option name | Default | Description | -| -------------------- | ----------------------------- | --------------------------------------------------------------------------------------------------------- | -| `log_level` | INFO | Logging level. Allowed levels are `ERROR`, `WARNING`, `INFO`, and `DEBUG`. | -| `log_file_path` | `/var/log/calico/cni/cni.log` | Location on each host to write CNI log files to. Logging to file can be disabled by removing this option. | -| `log_file_max_size` | 100 | Max file size in MB log files can reach before they are rotated. | -| `log_file_max_age` | 30 | Max age in days that old log files will be kept on the host before they are removed. | -| `log_file_max_count` | 10 | Max number of rotated log files allowed on the host before they are cleaned up. | - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "log_level": "DEBUG", - "log_file_path": "/var/log/calico/cni/cni.log", - "ipam": { - "type": "calico-ipam" - } -} -``` - -### IPAM - -When using {{prodname}} IPAM, the following flags determine what IP addresses should be assigned. NOTE: These flags are strings and not boolean values. - -- `assign_ipv4` (default: `"true"`) -- `assign_ipv6` (default: `"false"`) - -A specific IP address can be chosen by using [`CNI_ARGS`](https://github.com/appc/cni/blob/master/SPEC.md#parameters) and setting `IP` to the desired value. - -By default, {{prodname}} IPAM will assign IP addresses from all the available IP pools. - -Optionally, the list of possible IPv4 and IPv6 pools can also be specified via the following properties: - -- `ipv4_pools`: An array of CIDR strings or pool names. (e.g., `"ipv4_pools": ["10.0.0.0/24", "20.0.0.0/16", "default-ipv4-ippool"]`) -- `ipv6_pools`: An array of CIDR strings or pool names. (e.g., `"ipv6_pools": ["2001:db8::1/120", "namedpool"]`) - -Example CNI config: - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam", - "assign_ipv4": "true", - "assign_ipv6": "true", - "ipv4_pools": ["10.0.0.0/24", "20.0.0.0/16", "default-ipv4-ippool"], - "ipv6_pools": ["2001:db8::1/120", "default-ipv6-ippool"] - } -} -``` - -:::note - -`ipv6_pools` will be respected only when `assign_ipv6` is set to `"true"`. - -::: - -Any IP pools specified in the CNI config must have already been created. It is an error to specify IP pools in the config that do not exist. - -### Container settings - -The following options allow configuration of settings within the container namespace. - -- allow_ip_forwarding (default is `false`) - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "container_settings": { - "allow_ip_forwarding": true - } -} -``` - -### Readiness Gates - -The following option makes CNI plugin wait for specified endpoint(s) to be ready before configuring pod networking. - -- `readiness_gates` - -This is an optional property that takes an array of URLs. Each URL specified will be polled for readiness and pod networking will continue startup once all readiness_gates are ready. - -Example CNI config: - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "readiness_gates": ["http://localhost:9099/readiness", "http://localhost:8888/status"] -} -``` - -## Kubernetes specific - -When using the {{prodname}} CNI plugin with Kubernetes, the plugin must be able to access the Kubernetes API server to find the labels assigned to the Kubernetes pods. The recommended way to configure access is through a `kubeconfig` file specified in the `kubernetes` section of the network config. e.g. - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "kubernetes": { - "kubeconfig": "/path/to/kubeconfig" - }, - "ipam": { - "type": "calico-ipam" - } -} -``` - -As a convenience, the API location can also be configured directly, e.g. - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "kubernetes": { - "k8s_api_root": "http://127.0.0.1:8080" - }, - "ipam": { - "type": "calico-ipam" - } -} -``` - -### Enabling Kubernetes policy - -If you wish to use the Kubernetes `NetworkPolicy` resource then you must set a policy type in the network config. -There is a single supported policy type, `k8s`. When set, -you must also run calico/kube-controllers with the policy, profile, and workloadendpoint controllers enabled. - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "/path/to/kubeconfig" - }, - "ipam": { - "type": "calico-ipam" - } -} -``` - -When using `type: k8s`, the {{prodname}} CNI plugin requires read-only Kubernetes API access to the `Pods` resource in all namespaces. - -## IPAM - -### Using host-local IPAM - -Calico can be configured to use [host-local IPAM](https://www.cni.dev/plugins/current/ipam/host-local/) instead of the default `calico-ipam`. Host -local IPAM uses a pre-determined CIDR per-host, and stores allocations locally on each node. This is in contrast to Calico IPAM, which dynamically -allocates blocks of addresses and single addresses alike in response to cluster needs. - -Host local IPAM is generally only used on clusters where integration with the Kubernetes [route controller](https://kubernetes.io/docs/concepts/architecture/cloud-controller/#route-controller) is necessary. -Note that some Calico features - such as the ability to request a specific address or pool for a pod - require Calico IPAM to function, and will not work with host-local IPAM enabled. - - - - -The `host-local` IPAM plugin can be configured by setting the `Spec.CNI.IPAM.Plugin` field to `HostLocal` on the [operator.tigera.io/Installation](installation/api.mdx#operator.tigera.io/v1.Installation) API. - -Calico will use the `host-local` IPAM plugin to allocate IPv4 addresses from the node's IPv4 pod CIDR if there is an IPv4 pool configured in `Spec.IPPools`, and an IPv6 address from the node's IPv6 pod CIDR if -there is an IPv6 pool configured in `Spec.IPPools`. - -The following example configures Calico to assign dual-stack IPs to pods using the host-local IPAM plugin. - -```yaml -kind: Installation -apiVersion: operator.tigera.io/v1 -metadata: - name: default -spec: - calicoNetwork: - ipPools: - - cidr: 192.168.0.0/16 - - cidr: 2001:db8::/64 - cni: - type: Calico - ipam: - type: HostLocal -``` - - - - -When using the CNI `host-local` IPAM plugin, two special values - `usePodCidr` and `usePodCidrIPv6` - are allowed for the subnet field (either at the top-level, or in a "range"). This tells the plugin to determine the subnet to use from the Kubernetes API based on the Node.podCIDR field. {{prodname}} does not use the `gateway` field of a range so that field is not required and it will be ignored if present. - -:::note - -`usePodCidr` and `usePodCidrIPv6` can only be used as the value of the `subnet` field, it cannot be used in -`rangeStart` or `rangeEnd` so those values are not useful if `subnet` is set to `usePodCidr`. - -::: - -{{prodname}} supports the host-local IPAM plugin's `routes` field as follows: - -- If there is no `routes` field, {{prodname}} will install a default `0.0.0.0/0`, and/or `::/0` route into the pod (depending on whether the pod has an IPv4 and/or IPv6 address). -- If there is a `routes` field then {{prodname}} will program _only_ the routes in the routes field into the pod. Since {{prodname}} implements a point-to-point link into the pod, the `gw` field is not required and it will be ignored if present. All routes that {{prodname}} installs will have {{prodname}}'s link-local IP as the next hop. - -{{prodname}} CNI plugin configuration: - -- `node_name` - - The node name to use when looking up the CIDR value (defaults to current hostname) - -```json -{ - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "kubernetes": { - "kubeconfig": "/path/to/kubeconfig", - "node_name": "node-name-in-k8s" - }, - "ipam": { - "type": "host-local", - "ranges": [[{ "subnet": "usePodCidr" }], [{ "subnet": "usePodCidrIPv6" }]], - "routes": [{ "dst": "0.0.0.0/0" }, { "dst": "2001:db8::/96" }] - } -} -``` - -When making use of the `usePodCidr` or `usePodCidrIPv6` options, the {{prodname}} CNI plugin requires read-only Kubernetes API access to the `Nodes` resource. - -#### Configuring node and typha - -When using `host-local` IPAM with the Kubernetes API datastore, you must configure both {{nodecontainer}} and the Typha deployment to use the `Node.podCIDR` field by setting the environment variable `USE_POD_CIDR=true` in each. - - - - -### Using Kubernetes annotations - -#### Specifying IP pools on a per-namespace or per-pod basis - -In addition to specifying IP pools in the CNI config as discussed above, {{prodname}} IPAM supports specifying IP pools per-namespace or per-pod using the following [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). - -- `cni.projectcalico.org/ipv4pools`: A list of configured IPv4 Pools from which to choose an address for the pod. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipv4pools': '["default-ipv4-ippool"]' - ``` - -- `cni.projectcalico.org/ipv6pools`: A list of configured IPv6 Pools from which to choose an address for the pod. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipv6pools': '["2001:db8::1/120"]' - ``` - -If provided, these IP pools will override any IP pools specified in the CNI config. - -:::note - -This requires the IP pools to exist before `ipv4pools` or -`ipv6pools` annotations are used. Requesting a subset of an IP pool -is not supported. IP pools requested in the annotations must exactly -match a configured [IPPool](resources/ippool.mdx) resource. - -::: - -:::note - -The {{prodname}} CNI plugin supports specifying an annotation per namespace. -If both the namespace and the pod have this annotation, the pod information will be used. -Otherwise, if only the namespace has the annotation the annotation of the namespace will -be used for each pod in it. - -::: - -#### Requesting a specific IP address - -You can also request a specific IP address through [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) with {{prodname}} IPAM. -There are two annotations to request a specific IP address: - -- `cni.projectcalico.org/ipAddrs`: A list of IPv4 and/or IPv6 addresses to assign to the Pod. The requested IP addresses will be assigned from {{prodname}} IPAM and must exist within a configured IP pool. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipAddrs': '["192.168.0.1"]' - ``` - -- `cni.projectcalico.org/ipAddrsNoIpam`: A list of IPv4 and/or IPv6 addresses to assign to the Pod, bypassing IPAM. Any IP conflicts and routing have to be taken care of manually or by some other system. - {{prodname}} will only distribute routes to a Pod if its IP address falls within a {{prodname}} IP pool using BGP mode. Calico will not distribute ipAddrsNoIpam routes when operating in VXLAN mode. If you assign an IP address that is not in a {{prodname}} IP pool or if its IP address falls within a {{prodname}} IP pool that uses VXLAN encapsulation, you must ensure that routing to that IP address is taken care of through another mechanism. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/ipAddrsNoIpam': '["10.0.0.1"]' - ``` - - The ipAddrsNoIpam feature is disabled by default. It can be enabled in the feature_control section of the CNI network config: - - ```json - { - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "feature_control": { - "ip_addrs_no_ipam": true - } - } - ``` - - :::caution - - This feature allows for the bypassing of network policy via IP spoofing. - Users should make sure the proper admission control is in place to prevent users from selecting arbitrary IP addresses. - - ::: - -:::note - -- The `ipAddrs` and `ipAddrsNoIpam` annotations can't be used together. -- You can only specify one IPv4/IPv6 or one IPv4 and one IPv6 address with these annotations. -- When `ipAddrs` or `ipAddrsNoIpam` is used with `ipv4pools` or `ipv6pools`, `ipAddrs` / `ipAddrsNoIpam` take priority. - -::: - -#### Requesting a floating IP - -You can request a floating IP address for a pod through [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/) with {{prodname}}. - -:::note - -The specified address must belong to an IP Pool for advertisement to work properly. - -::: - -- `cni.projectcalico.org/floatingIPs`: A list of floating IPs which will be assigned to the pod's workload endpoint. - - Example: - - ```yaml - annotations: - 'cni.projectcalico.org/floatingIPs': '["10.0.0.1"]' - ``` - - The floatingIPs feature is disabled by default. It can be enabled in the feature_control section of the CNI network config: - - ```json - { - "name": "any_name", - "cniVersion": "0.1.0", - "type": "calico", - "ipam": { - "type": "calico-ipam" - }, - "feature_control": { - "floating_ips": true - } - } - ``` - - :::caution - - This feature can allow pods to receive traffic which may not have been intended for that pod. - Users should make sure the proper admission control is in place to prevent users from selecting arbitrary floating IP addresses. - - ::: - -### Using IP pools node selectors - -Nodes will only assign workload addresses from IP pools which select them. By -default, IP pools select all nodes, but this can be configured using the -`nodeSelector` field. Check out the [IP pool resource document](resources/ippool.mdx) - -for more details. - -Example: - -1. Create (or update) an IP pool that only allocates IPs for nodes where it - contains a label `rack=0`. - - ```bash - calicoctl create -f -< - - -This document does not apply to operator installations of Calico. - - - - -The paths listed here are the key or path prefixes that a particular {{prodname}} -component needs access to in etcd to function successfully. - -:::note - -The path prefixes listed here may change in the future and at that point anything -referencing them (like etcd roles) would need to be updated appropriately. - -::: - -## {{nodecontainer}} - -| Path | Access | -| ------------------------------------------------------------- | ------ | -| /calico/felix/v1/\* | RW | -| /calico/felix/v2/\* | RW | -| /calico/ipam/v2/\* | RW | -| /calico/resources/v3/projectcalico.org/felixconfigurations/\* | RW | -| /calico/resources/v3/projectcalico.org/nodes/\* | RW | -| /calico/resources/v3/projectcalico.org/workloadendpoints/\* | RW | -| /calico/resources/v3/projectcalico.org/clusterinformations/\* | RW | -| /calico/resources/v3/projectcalico.org/ippools/\* | RW | -| /calico/resources/v3/projectcalico.org/\* | R | - -## Felix as a stand alone process - -| Path | Access | -| ----------------------------------------- | ------ | -| /calico/felix/v1/\* | RW | -| /calico/felix/v2/\* | RW | -| /calico/resources/v3/projectcalico.org/\* | R | - -## CNI-plugin - -| Path | Access | -| ------------------------------------------------------------- | ------ | -| /calico/ipam/v2/\* | RW | -| /calico/resources/v3/projectcalico.org/workloadendpoints/\* | RW | -| /calico/resources/v3/projectcalico.org/ippools/\* | R | -| /calico/resources/v3/projectcalico.org/clusterinformations/\* | R | -| /calico/resources/v3/projectcalico.org/nodes/\* | R | - -## calico/kube-controllers - -| Path | Access | -| ----------------------------------------------------------------------- | ------ | -| /calico/ipam/v2/\* | RW | -| /calico/resources/v3/projectcalico.org/profiles/\* | RW | -| /calico/resources/v3/projectcalico.org/networkpolicies/\* | RW | -| /calico/resources/v3/projectcalico.org/nodes/\* | RW | -| /calico/resources/v3/projectcalico.org/clusterinformations/\* | RW | -| /calico/resources/v3/projectcalico.org/hostendpoints/\* | RW | -| /calico/resources/v3/projectcalico.org/kubecontrollersconfigurations/\* | RW | -| /calico/resources/v3/projectcalico.org/\* | R | - -:::note - -By default, `calico/kube-controllers` performs periodic -compaction of the etcd data store. If you limit it to just these -paths it will be unauthorized to perform this compaction, as that -operation requires root privileges on the etcd cluster. You should -[configure auto-compaction](https://etcd.io/docs/v3.3.12/op-guide/maintenance/) -on your etcd cluster and -[disable `calico/kube-controllers` periodic compaction](../kube-controllers/configuration.mdx). - -::: - -## OpenStack Calico driver for Neutron - -| Path | Access | -| ----------------------------------------- | ------ | -| /calico/resources/v3/projectcalico.org/\* | RW | -| /calico/dhcp/v1/\* | RW | -| /calico/dhcp/v2/\* | RW | -| /calico/compaction/v1/\* | RW | -| /calico/openstack/v1/\* | RW | -| /calico/openstack/v2/\* | RW | -| /calico/felix/v1/\* | R | -| /calico/felix/v2/\* | R | - -## OpenStack Calico DHCP agent - -| Path | Access | -| ----------------------------------------- | ------ | -| /calico/resources/v3/projectcalico.org/\* | R | -| /calico/dhcp/v1/\* | R | -| /calico/dhcp/v2/\* | R | - -## calicoctl (read only access) - -| Path | Access | -| ----------------------------------------- | ------ | -| /calico/ipam/v2/\* | R | -| /calico/resources/v3/projectcalico.org/\* | R | - -## calicoctl (policy editor access) - -| Path | Access | -| --------------------------------------------------------------- | ------ | -| /calico/ipam/v2/\* | R | -| /calico/resources/v3/projectcalico.org/\* | R | -| /calico/resources/v3/projectcalico.org/globalnetworkpolicies/\* | RW | -| /calico/resources/v3/projectcalico.org/globalnetworksets/\* | RW | -| /calico/resources/v3/projectcalico.org/networkpolicies/\* | RW | -| /calico/resources/v3/projectcalico.org/networksets/\* | RW | -| /calico/resources/v3/projectcalico.org/profiles/\* | RW | - -## calicoctl (full read/write access) - -| Path | Access | -| ----------------------------------------- | ------ | -| /calico/ipam/v2/\* | RW | -| /calico/resources/v3/projectcalico.org/\* | RW | - - - - diff --git a/calico_versioned_docs/version-3.25/reference/etcd-rbac/certificate-generation.mdx b/calico_versioned_docs/version-3.25/reference/etcd-rbac/certificate-generation.mdx deleted file mode 100644 index 1693a6730e..0000000000 --- a/calico_versioned_docs/version-3.25/reference/etcd-rbac/certificate-generation.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -description: Generate Certificates of Authority (CA) to authenticate users with etcd datastore. ---- - -# Generating certificates - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -This document does not apply to operator installations of Calico. - - - - -The etcd datastore has the concept of users that are linked to roles, where -each role has a defined set of access permissions to the data stored in etcd. -This tutorial walks you through the process of generating the Certificate -Authority (CA), Certificates and Keys that can be used to authenticate a -specific user with etcd. There are many different tools that can be used to -generate these files. This tutorial tries to layout the unique or specific -details that are needed for each of the different certificates but uses the -[hack/tls-setup tool from the etcd repo](https://github.com/coreos/etcd/tree/master/hack/tls-setup), -to make certificate generation easy. - -The etcd server links a certificate to a specific user by using the Common -Name (CN) attribute in the certificate. It is important to ensure that the CN -in the certificate for each component that will be accessing etcd match the -username in etcd that has the appropriate etcd roles for accessing its -required keys or paths. - -## Using the hack/tls-setup tool - -If you use hack/tls-setup too, ensure you have followed the README -and are able to run `make` successfully. - -The directories of interest in tls-setup are `config` and `certs`. -As indicated by the names `config` is where the configuration files are that -are used in CA and certificate generation live and `certs` are where the -generated files are written. - -Generating certificates with hack/tls-setup: - -1. Edit the [etcd certificate config](#configuration-for-the-etcd-certificates). -2. Add the - [per-user/per-component configuration files](#configuration-for-per-userper-components-etcd-certificates) -3. Run `make`. (Re-running `make` will regenerate the CA and all certificates.) - -Generating the certificates creates: - -- the CA -- a certificate and key pair for 3 etcd servers -- a certificate and key pair for etcd proxies -- the certificate and key pairs for each user/component - -# Configuration for the Certificate Authority - -The default CA configuration included with hack/tls-setup works well with no -additional configuration. The file `certs/ca.pem` generated will need to be -provided to all components (etcd, Kubernetes apiserver, and all calico -components). - -# Configuration for the etcd certificates - -Update the file `config/req-csr.json` by adding the IP addresses of the -servers that will be running the etcd members to the `"hosts"` -section. After generating the certs, three certs are created that can be -used for three etcd member servers (though just using one works, when testing). -These certificate and key files are `certs/etcd[123].pem` and -`certs/etcd[123]-key.pem` and a matching pair will need to be provided to -each etcd member. - -If using etcd proxies, the cert/key pair generated by the tool -(the files `certs/proxy1.pem` and `certs/proxy1-key.pem`) can be used with all -proxies or you could create individual cert/key pairs for each proxy too. - -# Configuration for per-user/per-component's etcd certificates - -The certificates for {{prodname}}, Kubernetes, or any other component can be -generated with configuration files similar to the one provided below. -Replace the <etcd_username> placeholder with the username of the etcd user -that has roles allowing access to the paths/prefix keys required by the -associated component. - -```json -{ - "CN": "", - "hosts": ["localhost"], - "key": { - "algo": "ecdsa", - "size": 384 - }, - "names": [ - { - "O": "autogenerated", - "OU": "etcd cluster", - "L": "the internet" - } - ] -} -``` - -The additional configuration files you create should be added to the `config` -directory located in your hack/tls-setup folder. To build certificates for -each new configuration add lines similar to those below to -the `req:` target in the Makefile. For each configuration added, make sure the -configuration file name and cert/key file prefix are updated appropriately by -substituting an appropriate name for <component>. - -```bash - $(CFSSL) gencert \ - -ca certs/ca.pem \ - -ca-key certs/ca-key.pem \ - -config config/ca-config.json \ - config/req-.json | $(JSON) -bare certs/ -``` - -Once the certificate and key files are generated they will need to be provided -to the proper component which is beyond the scope of this particular document. -See [this](kubernetes.mdx) for how to provide certificates to Kubernetes and {{prodname}} -components. - - - - diff --git a/calico_versioned_docs/version-3.25/reference/etcd-rbac/index.mdx b/calico_versioned_docs/version-3.25/reference/etcd-rbac/index.mdx deleted file mode 100644 index a779d8592a..0000000000 --- a/calico_versioned_docs/version-3.25/reference/etcd-rbac/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Tasks for protecting your etcd datastore. -hide_table_of_contents: true ---- - -# Configuring etcd RBAC - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes-advanced.mdx b/calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes-advanced.mdx deleted file mode 100644 index 7adfec02b4..0000000000 --- a/calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes-advanced.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -description: Limit user access to Calico components or calicoctl. ---- - -# Segmenting etcd on Kubernetes (advanced) - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -This document does not apply to operator installations of Calico. - - - - -This document describes advanced segmentation of the etcd roles to limit -access of individual {{prodname}} components or to limit calicoctl user access. -It assumes you have followed [this guide](kubernetes.mdx) for initial etcd -RBAC configuration of {{prodname}} and Kubernetes. - -## Why you might be interested in this guide - -You want to limit access on a per-{{prodname}} component level or create limited -access roles for calicoctl to the etcd datastore. - -## Components that need etcd Roles - -The following components need certificates with a Common Name that matches an -etcd user that has been given appropriate roles allowing access to the key -prefixes or paths listed or linked below. - -- [cni-plugin](calico-etcdv3-paths.mdx#cni-plugin) -- [{{prodname}} Kubernetes controllers](calico-etcdv3-paths.mdx#calicokube-controllers) -- [{{nodecontainer}}](calico-etcdv3-paths.mdx#caliconode) -- It may also be useful to create a certificate key pair for use with - calicoctl, even creating specific ones for - [read only access](calico-etcdv3-paths.mdx#calicoctl-read-only-access), - [policy editor access](calico-etcdv3-paths.mdx#calicoctl-policy-editor-access), - and [full read/write access](calico-etcdv3-paths.mdx#calicoctl-full-readwrite-access). - -All certificate/key pairs that are referenced below are assumed to have been -created for the specific component with the information above. - -## {{prodname}} components - -Once the certificates are generated and the users and roles have been setup -in etcd the components using them must be configured. Here are the same -components listed above and links to their detailed configuration pages: - -- [cni-plugin](../configure-cni-plugins.mdx) -- [{{prodname}} Kubernetes controllers](../kube-controllers/configuration.mdx) -- [{{nodecontainer}}](../configure-calico-node.mdx) -- [calicoctl](../../operations/calicoctl/install.mdx) - -Below are examples and suggestions when using a hosted {{prodname}} install where -the {{prodname}} components are launched through a Kubernetes manifest file, this -is not required and the configuration could be achieved by configuring services -that run outside of Kubernetes. - -### Per component certificate setup - -A setup that needs a certificate for each component is possible while using a -hosted manifest. This setup requires a certificate for each different {{prodname}} -component type listed above (cni-plugin, {{prodname}} Kubernetes controllers, and -`{{nodecontainer}}`). - -This setup needs similar updates to the manifest like what is described in -[Using etcd RBAC to segment Kubernetes and {{prodname}}: Updating a hosted {{prodname}} manifest](kubernetes.mdx#updating-a-hosted-Calico-manifest), -with the in addition to those updates a separate Secret for _each_ component -must be created which holds the CA, certificate, and key data base64 encoded. -Then the specific Secret for each component must be in the `volumes` list -for the correct pod and the `volumeMounts` for the appropriate container must -reference the volume for the `/calico-secrets` mountPath. - -### Per node per component certificate setup - -While the above is a good step toward locking down access to etcd and would -probably satisfy the needs of many, there is a third option that could -utilize a different certificate for each component for each node. This type -of setup can be achieved multiple ways and will be left as an exercise for -the implementor. Some possibilities for achieving this are: - -- Installing and starting the {{prodname}} components with a configuration management - tool which installs and configures the certificates. -- Creating a manifest with a side car container that pulls the proper - certificate information from Vault or other secret management tool. - - - - diff --git a/calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes.mdx b/calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes.mdx deleted file mode 100644 index 30578358de..0000000000 --- a/calico_versioned_docs/version-3.25/reference/etcd-rbac/kubernetes.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -description: Limit user access to Kubernetes and Calico components. ---- - -# Segmenting etcd on Kubernetes (basic) - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -This document does not apply to operator installations of Calico. - - - - -When using etcd with RBAC, all components that access etcd must be configured -with the proper certificates. This document describes the users and roles -needed to segment etcd so that Kubernetes and {{prodname}} can only read and write -within their respected subtrees/prefixes. To configure more compartmentalized -configurations of the {{prodname}} components, see this addon: -[guide](kubernetes-advanced.mdx). - -This guide assumes you are following the general -[Generating certificates](index.mdx) and using its guidance -for setting up certificates and etcd cluster, users, and roles. - -## Why you might be interested in this guide - -You are using Kubernetes and {{prodname}} that share an etcd datastore and you wish -to ensure that {{prodname}} and Kubernetes are unable to access each others' etcd -data. - -## Needed etcd Roles - -The following components need certificates with a Common Name that matches an -etcd user that has been given appropriate roles allowing access to the key -prefixes or paths listed below. - -- kube-apiserver - - Read and write access to `/registry/`. - - The etcd user needs to be given the root role to perform compaction when - using the etcd v3 API (this also means that Kubernetes will have - full read and write access to v3 data). -- {{prodname}} - - Read and write access to `/calico/`. - -All certificate/key pairs that are referenced below are assumed to have been -created for the specific component with the information above. - -## Kubernetes API server - -The kube-apiserver is the only Kubernetes component that directly accesses etcd. -The flags required to provide the kube-apiserver with certificates for -accessing an etcd cluster are: - -- `--etcd-cafile=` -- `--etcd-keyfile=` - -Setting these will depend on the method used to deploy Kubernetes so refer -to your integrator's documentation for help setting these flags. - -## Updating a hosted {{prodname}} manifest - -To deploy {{prodname}} with the CA and {{prodname}}-specific certificate/key pair, -use [this manifest template]({{manifestsUrl}}/manifests/calico-etcd.yaml) -with the modifications described below. The same information could be added to -or updated in other manifests but the linked one is the most straight forward -example. - -The pieces that would need updating are: - -- The `calico-config` ConfigMap lines with `etcd_ca`, `etcd_cert`, and - `etcd_key` should be updated as follows - - ```yaml - etcd_ca: '/calico-secrets/etcd-ca' - etcd_cert: '/calico-secrets/etcd-cert' - etcd_key: '/calico-secrets/etcd-key' - ``` - -- The Secret named `calico-etcd-secrets` needs to be updated with the CA and - cert/key. The information stored in `data` in a Secret needs to be base64 - encoded. The files can be converted to base64 encoding by doing a command - like `cat | base64 -w 0` on each file and then inserting the output - to the appropriate field. - - - The `etcd-key` field needs the base64 encoded file contents from the - key file. - - The `etcd-cert` field needs the base64 encoded file contents from the - certificate file. - - The `etcd-ca` field needs the base64 encoded file contents from the - Certificate Authority certificate. - -- If sharing an etcd cluster with Kubernetes, disable etcd compaction in the - calico-kube-controllers deployment by setting the `COMPACTION_PERIOD` environment variable to 0. - -Once the updates above are made then the manifest can be applied in the normal manner. - - - - diff --git a/calico_versioned_docs/version-3.25/reference/etcd-rbac/overview.mdx b/calico_versioned_docs/version-3.25/reference/etcd-rbac/overview.mdx deleted file mode 100644 index 98825010cc..0000000000 --- a/calico_versioned_docs/version-3.25/reference/etcd-rbac/overview.mdx +++ /dev/null @@ -1,70 +0,0 @@ ---- -description: Protect your etcd datastore by restricting operation permissions. ---- - -# Setting up etcd certificates for RBAC - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -This document does not apply to operator installations of Calico. - - - - -When using etcd it is a good idea to protect the data stored there. This is -even more true when you have multiple components using a common etcd cluster. -This set of tutorials guides you through the process of locking down and -segmenting access to the data in your etcd datastore. - -## Why you might be interested in this guide - -- You want to restrict the operations that support staff are able to perform - to minimize accidental corruption or deletion of etcd data. -- You want to use a single etcd cluster for {{prodname}} and Kubernetes (rather than - having an etcd cluster for {{prodname}} and a separate etcd cluster for Kubernetes. -- You want to restrict the read/write access of the various {{prodname}} components - as an additional safety measure. - -## Configuration Concept - -The central piece that will link the components together is the Certificate -Authority(CA). It will be used to -generate certificates and keys that the etcd members (and proxies) and components -(like {{prodname}} and Kubernetes) will need to authenticate with the etcd cluster. -Because all the certificates will be generated from the same CA and all the -components will have the CA certificate, those connections can be mutually -authenticated. - -The certificates and keys generated for the components accessing etcd will allow -access to particular key prefixes or paths in etcd. The certificates are linked -with an etcd username by being generated with a Common Name (CN) that matches -the etcd username. The etcd username is linked with a role or multiple roles -that allow access to the appropriate data in etcd. - -## Requirements - -- If using only the v2 API with etcd, as {{prodname}} does, then the minimum etcd - version required is 3.0.12. -- If using the v3 API with etcd, as Kubernetes can, then the minimum etcd - version required is 3.2. (Note: The 3.x version of etcd supports both the v2 - and the v3 API.) - -## Setup - -1. [Generate CA, certificates, and keys](certificate-generation.mdx). -2. Setup etcd with the CA certificate and the certificates generated in step 1. - See the - [etcd security op-guide](https://coreos.com/etcd/docs/latest/op-guide/security.html) - for help configuring etcd. -3. [Create Users and Roles in etcd](users-and-roles.mdx). -4. Configure components. For example: - - [Setting up Kubernetes with {{prodname}} utilizing etcd RBAC](kubernetes.mdx). - - [Advanced Kubernetes set ups utilizing etcd RBAC](kubernetes-advanced.mdx). - - - - diff --git a/calico_versioned_docs/version-3.25/reference/etcd-rbac/users-and-roles.mdx b/calico_versioned_docs/version-3.25/reference/etcd-rbac/users-and-roles.mdx deleted file mode 100644 index 601a447395..0000000000 --- a/calico_versioned_docs/version-3.25/reference/etcd-rbac/users-and-roles.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -description: Provide role-based access control to etcd datastore. ---- - -# Creating users and roles - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -This document does not apply to operator installations of Calico. - - - - -Providing role based access control within etcd requires the following: - -- Creation of etcd roles which provide appropriate access to the specific set - of etcd keys required by the role -- Creation of etcd users who are assigned roles - -:::note - -The etcd release 3.x+ supports both v2 and v3 of its API. The etcd server -keeps the roles and users separate, this means that if a user/role is created -with the v2 API it will not appear in the v3 API. When adding roles and users -they must be added through the API version that matches the version the -component will be using. This concern can be ignored if all roles and users -are added through both API versions. - -::: - -## Users and Roles creation guides - -Use the following guides to set up your users, roles, and assignment of roles -to users. Since this document assumes that you have configured your etcd cluster -to use certificates, you must pass a proper CA and cert/key pair to the -commands used in the below guides. - -- [etcd v2 guide](https://etcd.io/docs/v2/authentication/) -- [etcd v3 guide](https://etcd.io/docs/latest/op-guide/security/) - -## Suggestions for your roles and users - -- Create a root user under both the v2 and v3 etcd API to ensure access after - you enable authentication on your cluster. -- Create a guest role (particularly on the v2 API) and ensure it does not have - access to your cluster. -- Enable authentication on both the v2 and the v3 API, enabling it on one does - not enable it on the other. Make sure you enable authentication only after - you have created your root users. -- Ensure your usernames match the Common Name set in your certificates to allow - access without specifying the username to your components. - - - - diff --git a/calico_versioned_docs/version-3.25/reference/faq.mdx b/calico_versioned_docs/version-3.25/reference/faq.mdx deleted file mode 100644 index 6bb1642b7b..0000000000 --- a/calico_versioned_docs/version-3.25/reference/faq.mdx +++ /dev/null @@ -1,501 +0,0 @@ ---- -description: Common questions that users ask about Calico. ---- - -# Frequently asked questions - -## Why use {{prodname}}? - -The problem {{prodname}} tries to solve is the networking of workloads (VMs, -containers, etc) in a high scale environment. Existing L2-based methods -for solving this problem have problems at high scale. Compared to these, -we think {{prodname}} is more scalable, simpler, and more flexible. We think -you should look into it if you have more than a handful of nodes on a -single site. - -{{prodname}} also provides a rich network security model that -allows operators and developers to declare intent-based network security -policy that is automatically rendered into distributed firewall rules -across a cluster of containers, VMs, and/or servers. - -For a more detailed discussion of this topic, see our blog post at -[Why Calico?](https://www.projectcalico.org/why-calico/). - -## Does {{prodname}} work with IPv6? - -Yes! {{prodname}}'s core components support IPv6 out of the box. However, -not all orchestrators that we integrate with support IPv6 yet. - -## Why does my container have a route to 169.254.1.1? - -In a {{prodname}} network, each host acts as a gateway router for the -workloads that it hosts. In container deployments, {{prodname}} uses -169.254.1.1 as the address for the {{prodname}} router. By using a -link-local address, {{prodname}} saves precious IP addresses and avoids -burdening the user with configuring a suitable address. - -While the routing table may look a little odd to someone who is used to -configuring LAN networking, using explicit routes rather than -subnet-local gateways is fairly common in WAN networking. - -## Why isn't {{prodname}} working with a containerized Kubelet? - -{{prodname}} hosted install places the necessary CNI binaries and config on each -Kubernetes node in a directory on the host as specified in the manifest. By -default it places binaries in /opt/cni/bin and config /etc/cni/net.d. - -When running the kubelet as a container using hyperkube, -you need to make sure that the containerized kubelet can see the CNI network -plugins and config that have been installed by mounting them into the kubelet container. - -For example add the following arguments to the kubelet-wrapper service: - -``` ---volume /etc/cni/net.d:/etc/cni/net.d \ ---volume /opt/cni/bin:/opt/cni/bin \ -``` - -Without the above volume mounts, the kubelet will not call the {{prodname}} CNI binaries, and so -{{prodname}} [workload endpoints](resources/workloadendpoint.mdx) will -not be created, and {{prodname}} policy will not be enforced. - -## How do I view {{prodname}} CNI logs? - -The {{prodname}} CNI plugin emits logs to stderr, which are then logged out by the kubelet. Where these logs end up -depend on how your kubelet is configured. For deployments using `systemd`, you can do this via `journalctl`. - -The log level can be configured via the CNI network configuration file, by changing the value of the -key `log_level`. See [Configuring the {{prodname}} CNI plugins](configure-cni-plugins.mdx) for more information. - -CNI plugin logs can also be found in `/var/log/calico/cni`. - -## How do I configure the pod IP range? - -When using {{prodname}} IPAM, IP addresses are assigned from [IP Pools](resources/ippool.mdx). - -By default, all enabled IP pools are used. However, you can specify which IP pools to use for IP address management in the [CNI network config](configure-cni-plugins.mdx#ipam), -or on a per-pod basis using [Kubernetes annotations](configure-cni-plugins.mdx#using-kubernetes-annotations). - -## How do I assign a specific IP address to a pod? - -For most use cases it's not necessary to assign specific IP addresses to a Kubernetes pod and it's recommended to use Kubernetes services instead. -However, if you do need to assign a particular address to a pod, {{prodname}} provides two ways of doing this: - -- You can request an IP that is available in {{prodname}} IPAM using the `cni.projectcalico.org/ipAddrs` annotation. -- You can request an IP using the `cni.projectcalico.org/ipAddrsNoIpam` annotation. Note that this annotation bypasses the configured IPAM plugin, and thus in most cases it is recommended to use the above annotation. - -See the [Requesting a specific IP address](configure-cni-plugins.mdx#requesting-a-specific-ip-address) section in the CNI plugin reference documentation for more details. - -## Why can't I see the 169.254.1.1 address mentioned above on my host? - -{{prodname}} tries hard to avoid interfering with any other configuration -on the host. Rather than adding the gateway address to the host side -of each workload interface, {{prodname}} sets the `proxy_arp` flag on the -interface. This makes the host behave like a gateway, responding to -ARPs for 169.254.1.1 without having to actually allocate the IP address -to the interface. - -## Why do all cali\* interfaces have the MAC address ee:ee:ee:ee:ee:ee? - -In some setups the kernel is unable to generate a persistent MAC address and so -{{prodname}} assigns a MAC address itself. Since {{prodname}} uses -point-to-point routed interfaces, traffic does not reach the data link layer -so the MAC Address is never used and can therefore be the same for all the -cali\* interfaces. - -## Can I prevent my Kubernetes pods from initiating outgoing connections? - -Yes! The Kubernetes [`NetworkPolicy`](https://kubernetes.io/docs/concepts/services-networking/network-policies/) -API added support for egress policies in v1.8. You can also use `calicoctl` -to configure egress policy to prevent Kubernetes pods from initiating outgoing -connections based on the full set of supported {{prodname}} policy primitives -including labels, Kubernetes namespaces, CIDRs, and ports. - -## I've heard {{prodname}} uses proxy ARP, doesn't proxy ARP cause a lot of problems? - -It can, but not in the way that {{prodname}} uses it. - -In container deployments, {{prodname}} only uses proxy ARP for resolving the -169.254.1.1 address. The routing table inside the container ensures -that all traffic goes via the 169.254.1.1 gateway so that is the only -IP that will be ARPed by the container. - -## Is {{prodname}} compliant with PCI/DSS requirements? - -PCI certification applies to the whole end-to-end system, of which -{{prodname}} would be a part. We understand that most current solutions use -VLANs, but after studying the PCI requirements documents, we believe -that {{prodname}} does meet those requirements and that nothing in the -documents _mandates_ the use of VLANs. - -## How do I enable IP-in-IP and NAT outgoing on an IP pool? - -1. Retrieve current IP pool config. - - ```bash - calicoctl get ipPool --export -o yaml > pool.yaml - ``` - -2. Modify IP pool config. - - Modify the pool's spec to enable IP-in-IP and NAT outgoing. (See - [IP pools](resources/ippool.mdx) - for other settings that can be edited.) - - ```shell - - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: ippool-1 - spec: - cidr: 192.168.0.0/16 - ipipMode: Always - natOutgoing: true - ``` - -3. Load the modified file. - - ```bash - calicoctl replace -f pool.yaml - ``` - -## How does {{prodname}} maintain saved state? - -State is saved in a few places in a {{prodname}} deployment, depending on -whether it's global or local state. - -Local state is state that belongs on a single compute host, associated -with a single running Felix instance (things like kernel routes, tap -devices etc.). Local state is entirely stored by the Linux kernel on the -host, with Felix storing it only as a temporary mirror. This makes Felix -effectively stateless, with the kernel acting as a backing data store on -one side and etcd as a data source on the other. - -If Felix is restarted, it learns current local state by interrogating -the kernel at start up. It then reads from `etcd` all the local state -which it should have, and updates the kernel to match. This approach has -strong resiliency benefits, in that if Felix restarts you don't suddenly -lose access to your VMs or containers. As long as the Linux kernel is -running, you've still got full functionality. - -The bulk of global state is mastered in whatever component hosts the -plugin. - -- In the case of OpenStack, this means a Neutron database. Our - OpenStack plugin (more strictly a Neutron ML2 driver) queries the - Neutron database to find out state about the entire deployment. That - state is then reflected to `etcd` and so to Felix. -- In certain cases, `etcd` itself contains the master copy of - the data. This is because some Docker deployments have an `etcd` - cluster that has the required resiliency characteristics, used to - store all system configuration and so `etcd` is configured so as to - be a suitable store for critical data. -- In other orchestration systems, it may be stored in distributed - databases, either owned directly by the plugin or by the - orchestrator itself. - -The only other state storage in a {{prodname}} network is in the BGP sessions, -which approximate a distributed database of routes. This BGP state is -simply a replicated copy of the per-host routes configured by Felix -based on the global state provided by the orchestrator. - -This makes the {{prodname}} design very simple, because we store very little -state. All of our components can be shut down and restarted without risk, -because they resynchronize state as necessary. This makes modeling -their behavior extremely simple, reducing the complexity of bugs. - -## I heard {{prodname}} is suggesting layer 2: I thought you were layer 3! What's happening? - -It's important to distinguish what {{prodname}} provides to the workloads -hosted in a data center (a purely layer 3 network) with what the {{prodname}} -project _recommends_ operators use to build their underlying network -fabric. - -{{prodname}}'s core principle is that _applications_ and _workloads_ -overwhelmingly need only IP connectivity to communicate. For this reason -we build an IP-forwarded network to connect the tenant applications and -workloads to each other and the broader world. - -However, the underlying physical fabric obviously needs to be set up -too. Here, {{prodname}} has discussed how both a layer 2 (see -[here](architecture/design/l2-interconnect-fabric.mdx)) -or a layer 3 (see -[here](architecture/design/l3-interconnect-fabric.mdx)) -fabric -could be integrated with {{prodname}}. This is one of the great strengths of -the {{prodname}} model: it allows the infrastructure to be decoupled from what -we show to the tenant applications and workloads. - -We have some thoughts on different interconnect approaches (as noted -above), but just because we say that there are layer 2 and layer 3 ways -of building the fabric, and that those decisions may have an impact on -route scale, does not mean that {{prodname}} is "going back to Ethernet" or -that we're recommending layer 2 for tenant applications. In all cases we -forward on IP packets, no matter what architecture is used to build the -fabric. - -## How do I control policy/connectivity without virtual/physical firewalls? - -{{prodname}} provides an extremely rich security policy model, applying policy at the first and last hop -of the routed traffic within the {{prodname}} network (the source and -destination compute hosts). - -This model is substantially more robust to failure than a centralized -firewall-based model. In particular, the {{prodname}} approach has no -single point of failure: if the device enforcing the firewall has failed -then so has one of the workloads involved in the traffic (because the -firewall is enforced by the compute host). - -This model is also extremely amenable to scaling out. Because we have a -central repository of policy configuration, but apply it at the edges of -the network (the hosts) where it is needed, we automatically ensure that -the rules match the topology of the data center. This allows easy -scaling out, and gives us all the advantages of a single firewall (one -place to manage the rules), but none of the disadvantages (single points -of failure, state sharing, hairpinning of traffic, etc.). - -Lastly, we decouple the reachability of nodes and the policy applied to -them. We use BGP to distribute the topology of the network, telling -every node how to get to every endpoint in case two endpoints need to -communicate. We use policy to decide _if_ those two nodes should -communicate, and if so, how. If policy changes and two endpoints should -now communicate, where before they shouldn’t have, all we have to do is -update policy: the reachability information does not change. If later -they should be denied the ability to communicate, the policy is updated -again, and again the reachability doesn’t have to change. - -## How does {{prodname}} interact with the Neutron API? - -[{{prodname}}'s interpretation of Neutron API calls](../networking/openstack/neutron-api.mdx) -goes into extensive detail about how various Neutron API calls translate into -{{prodname}} actions. - -## Why isn't the `-p` flag on `docker run` working as expected? - -The `-p` flag tells Docker to set up port mapping to connect a port on the -Docker host to a port on your container via the `docker0` bridge. - -If a host's containers are connected to the `docker0` bridge interface, {{prodname}} -would be unable to enforce security rules between workloads on the same host; -all containers on the bridge would be able to communicate with one other. - -## Can {{prodname}} containers use any IP address within a pool, even subnet network/broadcast addresses? - -Yes! {{prodname}} is fully routed, so all IP address within a {{prodname}} pool are usable as -private IP addresses to assign to a workload. This means addresses commonly -reserved in a L2 subnet, such as IPv4 addresses ending in .0 or .255, are perfectly -okay to use. - -## How do I get network traffic into and out of my {{prodname}} cluster? - -The recommended way to get traffic to/from your {{prodname}} network is by peering to -your existing data center L3 routers using BGP and by assigning globally -routable IPs (public IPs) to containers that need to be accessed from the internet. -This allows incoming traffic to be routed directly to your containers without the -need for NAT. This flat L3 approach delivers exceptional network scalability -and performance. - -A common scenario is for your container hosts to be on their own -isolated layer 2 network, like a rack in your server room or an entire data -center. Access to that network is via a router, which also is the default -router for all the container hosts. - -If this describes your infrastructure, -[Configure outgoing NAT](../networking/configuring/workloads-outside-cluster.mdx explains in more detail -what to do. Otherwise, if you have a layer 3 (IP) fabric, then there are -detailed datacenter networking recommendations given -in [{{prodname}} over IP fabrics](architecture/design/l3-interconnect-fabric.mdx). -We'd also encourage you to [get in touch](https://www.projectcalico.org/contact) -to discuss your environment. - -### How can I enable NAT for outgoing traffic from containers with private IP addresses? - -If you want to allow containers with private IP addresses to be able to access the -internet then you can use your data center's existing outbound NAT capabilities -(typically provided by the data center's border routers). - -Alternatively you can use {{prodname}}'s built in outbound NAT capability by enabling it on any -{{prodname}} IP pool. In this case {{prodname}} will perform outbound NAT locally on the compute -node on which each container is hosted. - -```bash -cat < - natOutgoing: true -EOF -``` - -Where `` is the CIDR of your IP pool, for example `192.168.0.0/16`. - -Remember: the security profile for the container will need to allow traffic to the -internet as well. Refer to the appropriate guide for your orchestration -system for details on how to configure policy. - -### How can I enable NAT for incoming traffic to containers with private IP addresses? - -As discussed, the recommended way to get traffic to containers that -need to be accessed from the internet is to give them public IP addresses and -to configure {{prodname}} to peer with the data center's existing L3 routers. - -In cases where this is not possible then you can configure incoming NAT -(also known as DNAT) on your data centers existing border routers. Alternatively -you can configure incoming NAT with port mapping on the host on which the container -is running on. - -1. Create a new chain called `expose-ports` to hold the NAT rules. - - ```bash - iptables -t nat -N expose-ports - ``` - -1. Jump to that chain from the `OUTPUT` and `PREROUTING` chains. - - ```bash - iptables -t nat -A OUTPUT -j expose-ports - iptables -t nat -A PREROUTING -j expose-ports - ``` - - :::tip - - The `OUTPUT` chain is hit by traffic originating on the host itself; - the `PREROUTING` chain is hit by traffic coming from elsewhere. - - ::: - -1. For each port you want to expose, add a rule to the - expose-ports chain, replacing `` with the host IP that you - want to use to expose the port and `` with the host port. - - ```bash - iptables -t nat -A expose-ports -p tcp --destination \ - --dport -j DNAT --to : - ``` - -For example, you have a container to which you've assigned the `CALICO_IP` -of 192.168.7.4, and you have NGINX running on port 8080 inside the container. -If you want to expose this service on port 80 and your host has IP 192.0.2.1, -then you could run the following commands: - -```bash -iptables -t nat -N expose-ports -iptables -t nat -A OUTPUT -j expose-ports -iptables -t nat -A PREROUTING -j expose-ports - -iptables -t nat -A expose-ports -p tcp --destination 192.0.2.1 --dport 80 -j DNAT --to 192.168.7.4:8080 -``` - -The commands will need to be run each time the host is restarted. - -Remember: the security profile for the container will need to allow traffic to the exposed port as well. -Refer to the appropriate guide for your orchestration system for details on how to configure policy. - -### Can I run {{prodname}} in a public cloud environment? - -Yes. If you are running in a public cloud that doesn't allow either L3 peering or L2 connectivity between {{prodname}} hosts then you can enable IP-in-IP in your {{prodname}} IP pool: - -```bash -cat < - ipipMode: Always - natOutgoing: true -EOF -``` - -{{prodname}} will then route traffic between {{prodname}} hosts using IP-in-IP. - -For best performance in AWS, you can disable [Source/Destination Check](resources/felixconfig.mdx#spec) instead of using IP-in-IP or VXLAN; but only if all your instances are in the same subnet of your VPC. The setting must be `Disable` for the EC2 instance(s) to process traffic not matching the host interface IP address. This is also applicable if your cluster is spread across multiple subnets. If your cluster traffic crosses subnets, set `ipipMode` (or `vxlanMode`) to `CrossSubnet` to reduce the encapsulation overhead. Check [configuring overlay networking](../networking/configuring/vxlan-ipip.mdx) for the details. - -You can disable Source/Destination Check using [Felix configuration](resources/felixconfig.mdx), the AWS CLI, or the EC2 console. For example, using the AWS CLI: - -```bash -aws ec2 modify-instance-attribute --instance-id --source-dest-check "{\"Value\": false}" - -cat < - natOutgoing: true -EOF -``` - -### On AWS with IP-in-IP, why do I see no connectivity between workloads or only see connectivity if I ping in both directions? - -By default, AWS security groups block incoming IP-in-IP traffic. - -However, if an instance has recently sent some IP-in-IP traffic out when it receives some incoming IP-in-IP traffic, -then AWS sees that as a response to an outgoing connection and it allows the incoming traffic. This leads to some very -confusing behavior where traffic can be blocked and then suddenly start working! - -To resolve the issue, add a rule to your security groups that allows inbound and outbound IP-in-IP traffic (IP protocol -number 4) between your hosts. - -## In Calico for OpenStack, why can't a VM ping its default gateway? - -The concept of default gateway makes sense with OpenStack networking drivers that simulate -direct layer 2 (Ethernet) connectivity between VMs in the same Neutron network. With that -kind of simulation, - -- When a VM sends to another VM in the same network, there is no routing at all, from the - VM point of view. (Of course there may be routing in the underlay network, because - compute hosts may be on different subnets.) - -- When a VM sends to something outside its own network, it goes - by simulated layer 2 - - to the default gateway first, and then is routed to wherever it is addressed to. - -However OpenStack also allows drivers, including Calico, that use routing between the VMs -of a Neutron network. With Calico specifically, any packet sent by a VM is -layer-2-terminated and IP-routed by the VM's compute host, whether the VM is sending to -another VM in the same network, or to anywhere else. So Calico doesn't need the "default -gateway" concept, and it doesn't really make any sense with Calico. If a VM thinks that -"my default gateway is the first hop at which the packets I send can be IP-routed", and in -any way relies on that, that will be wrong, with Calico networking. - -Now, with all that said, for detailed technical reasons to do with the DHCP server -(dnsmasq), Calico does actually configure the default gateway IP - i.e. bind it to a Linux -network interface - on every compute host with at least one VM in the relevant Neutron -network; and that is one of the ingredients needed, in Linux, for a VM to be able to ping -that IP. - -The reason why it still _isn't_ possible for a VM to ping that IP, is that Calico by -default configures iptables rules to block almost all communication _to_ its own host - -because in general, of course, a workload should not be able to access and possibly -compromise its host. There are a few pinholes here, e.g. for DHCP, but those do not -include ping (ICMP Echo). If you start running a command like `watch 'sudo iptables-save -c | grep DROP'` on a compute host, and then try pinging the default gateway IP from a VM -on that host, you will see the DROP count increasing as each ping packet is sent and -blocked. - -This behaviour is controlled by a config parameter named DefaultEndpointToHostAction, -whose default is DROP. For the sake of demonstration, you can change this by adding -`DefaultEndpointToHostAction = RETURN` to `/etc/calico/felix.cfg`, then use `sudo systemctl restart calico-felix` to restart Felix, and then you will observe that a VM on -that host _can_ ping its default gateway. However we do not recommend routinely operating -with `DefaultEndpointToHostAction = RETURN`, because that potentially allows a malicious -VM to compromise its host. - -In summary, then, there are two points behind why a VM cannot normally ping its default -gateway, with Calico. - -1. The default gateway concept just doesn't really fit, and isn't needed, given how - Calico routes everything at the compute node - which is a fundamental aspect of Calico - networking for OpenStack. - -1. Calico's iptables rules generally do not allow a VM to contact its host. - -## Can Calico do IP multicast? - -Calico is a routed L3 network where each pod gets a /32. There's no broadcast domain for pods. -That means that multicast doesn't just work as a side effect of broadcast. To get multicast to -work, the host needs to act as a multicast gateway of some kind. Calico's architecture was designed -to extend to cover that case but it's not part of the product as yet. diff --git a/calico_versioned_docs/version-3.25/reference/felix/configuration.mdx b/calico_versioned_docs/version-3.25/reference/felix/configuration.mdx deleted file mode 100644 index 69f2cf2ce1..0000000000 --- a/calico_versioned_docs/version-3.25/reference/felix/configuration.mdx +++ /dev/null @@ -1,315 +0,0 @@ ---- -description: Configure Felix, the daemon that runs on every machine that provides endpoints. ---- - -# Configuring Felix - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -If you have installed Calico using the operator, you cannot modify the environment provided to felix directly. To configure felix, see the [FelixConfiguration](../resources/felixconfig.mdx) resource instead. - - - - -:::note - -The following tables detail the configuration file and -environment variable parameters. For `FelixConfiguration` resource settings, -refer to [Felix Configuration Resource](../resources/felixconfig.mdx). - -::: - -Configuration for Felix is read from one of four possible locations, in order, as follows. - -1. Environment variables. -2. The Felix configuration file. -3. Host-specific `FelixConfiguration` resources (`node.`). -4. The global `FelixConfiguration` resource (`default`). - -The value of any configuration parameter is the value read from the -_first_ location containing a value. For example, if an environment variable -contains a value, it takes top precedence. - -If not set in any of these locations, most configuration parameters have -defaults, and it should be rare to have to explicitly set them. - -The full list of parameters which can be set is as follows. - -### General configuration - -| Configuration file parameter | Environment variable | Description | Schema | -| ----------------------------------- | ----------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | -| `DataplaneWatchdogTimeout` | `FELIX_DATAPLANEWATCHDOGTIMEOUT` | Deprecated: superseded by `HealthTimeoutOverrides`. Timeout before the main dataplane goroutine is determined to have hung and Felix will report non-live and non-ready. Can be increased if the liveness check incorrectly fails (for example if Felix is running slowly on a heavily loaded system). [Default: `90`] | int | -| `AwsSrcDstCheck` | `FELIX_AWSSRCDSTCHECK` | Set the [source-destination-check](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) when using AWS EC2 instances. Check [IAM role and profile configuration](../resources/felixconfig.mdx#aws-iam-rolepolicy-for-source-destination-check-configuration) for setting the necessary permission for this setting to work. [Default: `DoNothing`] | `DoNothing`, `Disable`, `Enable` | -| `DatastoreType` | `FELIX_DATASTORETYPE` | The datastore that Felix should read endpoints and policy information from. [Default: `etcdv3`] | `etcdv3`, `kubernetes` | -| `DeviceRouteSourceAddress` | `FELIX_DEVICEROUTESOURCEADDRESS` | IPv4 address to use as the source hint on device routes programmed by Felix [Default: No source hint is set on programmed routes and for local traffic from host to workload the source address will be chosen by the kernel.] | `` | -| `DeviceRouteSourceAddressIPv6` | `FELIX_DEVICEROUTESOURCEADDRESSIPV6` | IPv6 address to use as the source hint on device routes programmed by Felix [Default: No source hint is set on programmed routes and for local traffic from host to workload the source address will be chosen by the kernel.] | `` | -| `DeviceRouteProtocol` | `FELIX_DEVICEROUTEPROTOCOL` | This defines the route protocol added to programmed device routes. [Default: `RTPROT_BOOT`] | int | -| `DisableConntrackInvalidCheck` | `FELIX_DISABLECONNTRACKINVALIDCHECK` | Disable the dropping of packets that aren't either a valid handshake or part of an established connection. [Default: `false`] | boolean | -| `EndpointReportingDelaySecs` | `FELIX_ENDPOINTREPORTINGDELAYSECS` | Set the endpoint reporting delay between status check intervals, in seconds. Only used if endpoint reporting is enabled. [Default: `1`] | int | -| `EndpointReportingEnabled` | `FELIX_ENDPOINTREPORTINGENABLED` | Enable the endpoint status reporter. [Default: `false`] | boolean | -| `ExternalNodesCIDRList` | `FELIX_EXTERNALNODESCIDRLIST` | Comma-delimited list of IPv4 or CIDR of external-non-calico-nodes from which IPIP traffic is accepted by calico-nodes. [Default: ""] | string | -| `FailsafeInboundHostPorts` | `FELIX_FAILSAFEINBOUNDHOSTPORTS` | List of PortProto struct objects including UDP/TCP/SCTP ports and CIDRs that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, it defaults to `"tcp"`. If a CIDR is not specified, it will allow traffic from all addresses. To disable all inbound host ports, use the value `[]`. The default value allows ssh access, DHCP, BGP, etcd and the Kubernetes API. [Default: `[{"port":22,"protocol":"tcp"},{"port":68,"protocol":"udp"},{"port":179,"protocol":"tcp"},{"port":2379,"protocol":"tcp"}, {"port":2380,"protocol":"tcp"}, {"port":5473,"protocol":"tcp"}, {"port":6443,"protocol":"tcp"}, {"port":6666,"protocol":"tcp"}, {"port":6667,"protocol":"tcp"}]`] | list | -| `FailsafeOutboundHostPorts` | `FELIX_FAILSAFEOUTBOUNDHOSTPORTS` | List of PortProto struct objects including UDP/TCP/SCTP ports and CIDRs that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. For backwards compatibility, if the protocol is not specified, it defaults to `"tcp"`. If a CIDR is not specified, it will allow traffic from all addresses. To disable all outbound host ports, use the value `[]`. The default value opens etcd's standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP, DNS, BGP and the Kubernetes API. [Default: `[{"port":53,"protocol":"udp"},{"port":67,"protocol":"udp"}, {"port":179,"protocol":"tcp"}, {"port":2379,"protocol":"tcp"}, {"port":2380,"protocol":"tcp"}, {"port":5473,"protocol":"tcp"}, {"port":6443,"protocol":"tcp"}, {"port": 6666,"protocol":"tcp"}, {"port":6667,"protocol":"tcp"}]`] | list | | -| `FelixHostname` | `FELIX_FELIXHOSTNAME` | The hostname Felix reports to the plugin. Should be used if the hostname Felix autodetects is incorrect or does not match what the plugin will expect. [Default: `socket.gethostname()`] | string | -| `HealthEnabled` | `FELIX_HEALTHENABLED` | When enabled, exposes felix health information via an http endpoint. | boolean | -| `HealthHost` | `FELIX_HEALTHHOST` | The address on which Felix will respond to health requests. [Default: `localhost`] | string | -| `HealthPort` | `FELIX_HEALTHPORT` | The port on which Felix will respond to health requests. [Default: `9099`] | int | -| `HealthTimeoutOverrides` | `FELIX_HEALTHTIMEOUTOVERRIDES` | Allows the internal watchdog timeouts of individual subcomponents to be overridden; example: "InternalDataplaneMainLoop=30s,CalculationGraph=2m". This is useful for working around "false positive" liveness timeouts that can occur in particularly stressful workloads or if CPU is constrained. For a list of active subcomponents, see Felix's logs. [Default: ``] | Comma-delimited list of key/value pairs where the values are durations: `1s`, `10s`, `5m`, etc. | -| `IpInIpEnabled` | `FELIX_IPINIPENABLED` | Optional, you shouldn't need to change this setting as Felix calculates if IPIP should be enabled based on the existing IP Pools. When set, this overrides whether Felix should configure an IPinIP interface on the host. When explicitly disabled in FelixConfiguration, Felix will not clean up addresses from the `tunl0` interface (use this if you need to add addresses to that interface and don't want to have them removed). [Default: unset] | optional boolean | -| `IpInIpMtu` | `FELIX_IPINIPMTU` | The MTU to set on the IPIP tunnel device. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx) [Default: `0`] | int | -| `IPv4VXLANTunnelAddr` | | IP address of the IPv4 VXLAN tunnel. This is system configured and should not be updated manually. | string | -| `LogFilePath` | `FELIX_LOGFILEPATH` | The full path to the Felix log. Set to `none` to disable file logging. [Default: `/var/log/calico/felix.log`] | string | -| `LogSeverityFile` | `FELIX_LOGSEVERITYFILE` | The log severity above which logs are sent to the log file. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeverityScreen` | `FELIX_LOGSEVERITYSCREEN` | The log severity above which logs are sent to the stdout. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeveritySys` | `FELIX_LOGSEVERITYSYS` | The log severity above which logs are sent to the syslog. Set to `none` for no logging to syslog. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogDebugFilenameRegex` | `FELIX_LOGDEBUGFILENAMEREGEX` | Controls which source code files have their Debug log output included in the logs. Only logs from files with names that match the given regular expression are included. The filter only applies to Debug level logs. [Default: `""`] | regex | -| `PolicySyncPathPrefix` | `FELIX_POLICYSYNCPATHPREFIX` | File system path where Felix notifies services of policy changes over Unix domain sockets. This is only required if you're configuring [application layer policy](https://github.com/projectcalico/app-policy). Set to `""` to disable. [Default: `""`] | string | -| `PrometheusGoMetricsEnabled` | `FELIX_PROMETHEUSGOMETRICSENABLED` | Set to `false` to disable Go runtime metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `PrometheusMetricsEnabled` | `FELIX_PROMETHEUSMETRICSENABLED` | Set to `true` to enable the Prometheus metrics server in Felix. [Default: `false`] | boolean | -| `PrometheusMetricsHost` | `FELIX_PROMETHEUSMETRICSHOST` | TCP network address that the Prometheus metrics server should bind to. [Default: `""`] | string | -| `PrometheusMetricsPort` | `FELIX_PROMETHEUSMETRICSPORT` | TCP port that the Prometheus metrics server should bind to. [Default: `9091`] | int | -| `PrometheusProcessMetricsEnabled` | `FELIX_PROMETHEUSPROCESSMETRICSENABLED` | Set to `false` to disable process metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `PrometheusWireGuardMetricsEnabled` | `FELIX_PROMETHEUSWIREGUARDMETRICSENABLED` | Set to `false` to disable wireguard device metrics collection, which Felix does by default. [Default: `true`] | boolean | -| `RemoveExternalRoutes` | `FELIX_REMOVEEXTERNALROUTES` | Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external applications to also add device routes. [Default: `true`] | bool | -| `ReportingIntervalSecs` | `FELIX_REPORTINGINTERVALSECS` | Interval at which Felix reports its status into the datastore. 0 means disabled and is correct for Kubernetes-only clusters. Must be non-zero in OpenStack deployments. [Default: `30`] | int | -| `ReportingTTLSecs` | `FELIX_REPORTINGTTLSECS` | Time-to-live setting for process-wide status reports. [Default: `90`] | int | -| `RouteTableRange` | `FELIX_ROUTETABLERANGE` | _deprecated in favor of `RouteTableRanges`_ Calico programs additional Linux route tables for various purposes. `RouteTableRange` specifies the indices of the route tables that Calico should use. [Default: `""`] | `-` | -| `RouteTableRanges` | `FELIX_ROUTETABLERANGES` | Calico programs additional Linux route tables for various purposes. `RouteTableRanges` specifies a set of table index ranges that Calico should use. Deprecates `RouteTableRange`, overrides `RouteTableRange`. [Default: `"1-250"`] | `-,-,...` | -| `RouteSyncDisabled` | `FELIX_ROUTESYNCDISABLED` | Set to `true` to disable Calico programming routes to local workloads. [Default: `false`] | boolean | -| `SidecarAccelerationEnabled` | `FELIX_SIDECARACCELERATIONENABLED` | Enable experimental acceleration between application and proxy sidecar when using [application layer policy](../../network-policy/istio/app-layer-policy.mdx). [Default: `false`] | boolean | -| `UsageReportingEnabled` | `FELIX_USAGEREPORTINGENABLED` | Reports anonymous {{prodname}} version number and cluster size to projectcalico.org. Logs warnings returned by the usage server. For example, if a significant security vulnerability has been discovered in the version of {{prodname}} being used. [Default: `true`] | boolean | -| `UsageReportingInitialDelaySecs` | `FELIX_USAGEREPORTINGINITIALDELAYSECS` | Minimum delay before first usage report, in seconds. [Default: `300`] | int | -| `UsageReportingIntervalSecs` | `FELIX_USAGEREPORTINGINTERVALSECS` | Interval at which to make usage reports, in seconds. [Default: `86400`] | int | -| `VXLANEnabled` | `FELIX_VXLANENABLED` | Optional, you shouldn't need to change this setting as Felix calculates if VXLAN should be enabled based on the existing IP Pools. When set, this overrides whether Felix should create the VXLAN tunnel device for VXLAN networking. [Default: unset] | optional boolean | -| `VXLANMTU` | `FELIX_VXLANMTU` | The MTU to set on the IPv4 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. See [Configuring MTU](../../networking/configuring/mtu.mdx) [Default: `0`] | int | -| `VXLANMTUV6` | `FELIX_VXLANMTUV6` | The MTU to set on the IPv6 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. See [Configuring MTU](../../networking/configuring/mtu.mdx) [Default: `0`] | int | -| `VXLANPort` | `FELIX_VXLANPORT` | The UDP port to use for VXLAN. [Default: `4789`] | int | -| `VXLANTunnelMACAddr` | | MAC address of the IPv4 VXLAN tunnel. This is system configured and should not be updated manually. | string | -| `VXLANVNI` | `FELIX_VXLANVNI` | The virtual network ID to use for VXLAN. [Default: `4096`] | int | -| `AllowVXLANPacketsFromWorkloads` | `FELIX_ALLOWVXLANPACKETSFROMWORKLOADS` | Set to `true` to allow VXLAN encapsulated traffic from workloads. [Default: `false`] | boolean | -| `AllowIPIPPacketsFromWorkloads` | `FELIX_ALLOWIPIPPACKETSFROMWORKLOADS` | Set to `true` to allow IPIP encapsulated traffic from workloads. [Default: `false`] | boolean | -| `TyphaAddr` | `FELIX_TYPHAADDR` | IPv4 address at which Felix should connect to Typha. [Default: none] | string | -| `TyphaK8sServiceName` | `FELIX_TYPHAK8SSERVICENAME` | Name of the Typha Kubernetes service | string | -| `Ipv6Support` | `FELIX_IPV6SUPPORT` | Enable {{prodname}} networking and security for IPv6 traffic as well as for IPv4. | boolean | -| `RouteSource` | `FELIX_ROUTESOURCE` | Where Felix gets is routing information from for VXLAN and the BPF dataplane. The CalicoIPAM setting is more efficient because it supports route aggregation, but it only works when Calico's IPAM or host-local IPAM is in use. Use the WorkloadIPs setting if you are using Calico's VXLAN or BPF dataplane and not using Calico IPAM or host-local IPAM. [Default: "CalicoIPAM"] | 'CalicoIPAM', or 'WorkloadIPs' | -| `mtuIfacePattern` | `FELIX_MTUIFACEPATTERN` | Pattern used to discover the host's interface for MTU auto-detection. [Default: ^((en|wl|ww|sl|ib)[copsvx].*|(eth|wlan|wwan).*) ] | regex | -| `FeatureDetectOverride` | `FELIX_FEATUREDETECTOVERRIDE` | Is used to override the feature detection. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=true,IPIPDeviceIsL3=true. "true" or "false" will force the feature, empty or omitted values are auto-detected. [Default: `""`] | string | - -### etcd datastore configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | -| `EtcdCaFile` | `FELIX_ETCDCAFILE` | Path to the file containing the root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures Felix to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing Felix to trust each of the CAs included. To disable authentication of the server by Felix, set the value to `none`. [Default: `/etc/ssl/certs/ca-certificates.crt`] | string | -| `EtcdCertFile` | `FELIX_ETCDCERTFILE` | Path to the file containing the client certificate issued to Felix. Enables Felix to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/felix/cert.pem` (optional) | string | -| `EtcdEndpoints` | `FELIX_ETCDENDPOINTS` | Comma-delimited list of etcd endpoints to connect to. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379`. | `://:` | -| `EtcdKeyFile` | `FELIX_ETCDKEYFILE` | Path to the file containing the private key matching Felix's client certificate. Enables Felix to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/felix/key.pem` (optional) | string | - -### Kubernetes API datastore configuration - -The Kubernetes API datastore driver reads its configuration from Kubernetes-provided environment variables. - -### iptables dataplane configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ------------------------------------ | ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | -| `ChainInsertMode` | `FELIX_CHAININSERTMODE` | Controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. `Insert` is the safe default since it prevents {{prodname}}'s rules from being bypassed. If you switch to `Append` mode, be sure that the other rules in the chains signal acceptance by falling through to the {{prodname}} rules, otherwise the {{prodname}} policy will be bypassed. [Default: `Insert`] | `Insert`, `Append` | -| `DefaultEndpointToHostAction` | `FELIX_DEFAULTENDPOINTTOHOSTACTION` | This parameter controls what happens to traffic that goes from a workload endpoint to the host itself (after the traffic hits the endpoint egress policy). By default {{prodname}} blocks traffic from workload endpoints to the host itself with an iptables `Drop` action. If you want to allow some or all traffic from endpoint to host, set this parameter to `Return` or `Accept`. Use `Return` if you have your own rules in the iptables "INPUT" chain; {{prodname}} will insert its rules at the top of that chain, then `Return` packets to the "INPUT" chain once it has completed processing workload endpoint egress policy. Use `Accept` to unconditionally accept packets from workloads after processing workload endpoint egress policy. [Default: `Drop`] | `Drop`, `Return`, `Accept` | -| `GenericXDPEnabled` | `FELIX_GENERICXDPENABLED` | When enabled, Felix can fallback to the non-optimized `generic` XDP mode. This should only be used for testing since it doesn't improve performance over the non-XDP mode. [Default: `false`] | boolean | -| `InterfaceExclude` | `FELIX_INTERFACEEXCLUDE` | A comma-separated list of interface names that should be excluded when Felix is resolving host endpoints. The default value ensures that Felix ignores Kubernetes' internal `kube-ipvs0` device. If you want to exclude multiple interface names using a single value, the list supports regular expressions. For regular expressions you must wrap the value with `/`. For example having values `/^kube/,veth1` will exclude all interfaces that begin with `kube` and also the interface `veth1`. [Default: `kube-ipvs0`] | string | -| `IpsetsRefreshInterval` | `FELIX_IPSETSREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks the IP sets in the dataplane to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the other refresh intervals as a workaround for a [Linux kernel bug](https://bugzilla.netfilter.org/show_bug.cgi?id=1119) that was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to, a higher value to reduce Felix CPU usage. [Default: `10`] | int | -| `IptablesBackend` | `FELIX_IPTABLESBACKEND` | This parameter controls which variant of iptables binary Felix uses. Set this to `Auto` for auto detection of the backend. If a specific backend is needed then use `NFT` for hosts using a netfilter backend or `Legacy` for others. [Default: `Auto`] | `Legacy`, `NFT`, `Auto` | -| `IptablesFilterAllowAction` | `FELIX_IPTABLESFILTERALLOWACTION` | This parameter controls what happens to traffic that is allowed by a Felix policy chain in the iptables filter table (i.e., a normal policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. [Default: `Accept`] | `Accept`, `Return` | -| `IptablesLockFilePath` | `FELIX_IPTABLESLOCKFILEPATH` | _Deprecated:_ For iptables versions prior to v1.6.2, location of the iptables lock file (later versions of iptables always use value "/run/xtables.lock"). You may need to change this if the lock file is not in its standard location (for example if you have mapped it into Felix's container at a different path). [Default: `/run/xtables.lock`] | string | -| `IptablesLockProbeIntervalMillis` | `FELIX_IPTABLESLOCKPROBEINTERVALMILLIS` | Time, in milliseconds, that Felix will wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. [Default: `50`] | int | -| `IptablesLockTimeoutSecs` | `FELIX_IPTABLESLOCKTIMEOUTSECS` | Time, in seconds, that Felix will wait for the iptables lock. Versions of iptables prior to v1.6.2 support disabling the iptables lock by setting this value to 0; v1.6.2 and above do not so Felix will default to 10s if a non-positive number is used. To use this feature, Felix must share the iptables lock file with all other processes that also take the lock. When running Felix inside a container, this typically requires the file /run/xtables.lock on the host to be mounted into the `{{nodecontainer}}` or `calico/felix` container. [Default: `0` disabled for iptables <v1.6.2 or 10s for later versions] | int | -| `IptablesMangleAllowAction` | `FELIX_IPTABLESMANGLEALLOWACTION` | This parameter controls what happens to traffic that is allowed by a Felix policy chain in the iptables mangle table (i.e., a pre-DNAT policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. [Default: `Accept`] | `Accept`, `Return` | -| `IptablesMarkMask` | `FELIX_IPTABLESMARKMASK` | Mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. When using {{prodname}} with Kubernetes' `kube-proxy` in IPVS mode, [we recommend allowing at least 16 bits](#ipvs-bits). [Default: `0xffff0000`] | netmask | -| `IptablesNATOutgoingInterfaceFilter` | `FELIX_IPTABLESNATOUTGOINGINTERFACEFILTER` | This parameter can be used to limit the host interfaces on which Calico will apply SNAT to traffic leaving a Calico IPAM pool with "NAT outgoing" enabled. This can be useful if you have a main data interface, where traffic should be SNATted and a secondary device (such as the docker bridge) which is local to the host and doesn't require SNAT. This parameter uses the iptables interface matching syntax, which allows `+` as a wildcard. Most users will not need to set this. Example: if your data interfaces are eth0 and eth1 and you want to exclude the docker bridge, you could set this to `eth+` | string | -| `IptablesPostWriteCheckIntervalSecs` | `FELIX_IPTABLESPOSTWRITECHECKINTERVALSECS` | Period, in seconds, after Felix has done a write to the dataplane that it schedules an extra read back to check the write was not clobbered by another process. This should only occur if another application on the system doesn't respect the iptables lock. [Default: `1`] | int | -| `IptablesRefreshInterval` | `FELIX_IPTABLESREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks all iptables state to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable iptables refresh. [Default: `90`] | int | -| `LogPrefix` | `FELIX_LOGPREFIX` | The log prefix that Felix uses when rendering LOG rules. [Default: `calico-packet`] | string | -| `MaxIpsetSize` | `FELIX_MAXIPSETSIZE` | Maximum size for the ipsets used by Felix. Should be set to a number that is greater than the maximum number of IP addresses that are ever expected in a selector. [Default: `1048576`] | int | -| `NATPortRange` | `FELIX_NATPORTRANGE` | Port range used by iptables for port mapping when doing outgoing NAT. (Example: `32768:65000`). [Default: iptables maps source ports below 512 to other ports below 512: those between 512 and 1023 inclusive will be mapped to ports below 1024, and other ports will be mapped to 1024 or above. Where possible, no port alteration will occur.] | string | -| `NATOutgoingAddress` | `FELIX_NATOUTGOINGADDRESS` | Source address used by iptables for an SNAT rule when doing outgoing NAT. [Default: an iptables `MASQUERADE` rule is used for outgoing NAT which will use the address on the interface traffic is leaving on.] | `` | -| `NetlinkTimeoutSecs` | `FELIX_NETLINKTIMEOUTSECS` | Time, in seconds, that Felix will wait for netlink (i.e. routing table list/update) operations to complete before giving up and retrying. [Default: `10`] | float | -| `RouteRefreshInterval` | `FELIX_ROUTEREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable route refresh. [Default: `90`] | int | -| `ServiceLoopPrevention` | `FELIX_SERVICELOOPPREVENTION` | When [service IP advertisement is enabled](../../networking/configuring/advertise-service-ips.mdx), prevent routing loops to service IPs that are not in use, by dropping or rejecting packets that do not get DNAT'd by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. [Default: `Drop`] | `Drop`, `Reject`, `Disabled` | -| `WorkloadSourceSpoofing` | `FELIX_WORKLOADSOURCESPOOFING` | Controls whether pods can enable source IP address spoofing with the `cni.projectcalico.org/allowedSourcePrefixes` annotation. When set to `Any`, pods can use this annotation to send packets from any IP address. [Default: `Disabled`] | `Any`, `Disabled` | -| `XDPRefreshInterval` | `FELIX_XDPREFRESHINTERVAL` | Period, in seconds, at which Felix re-checks the XDP state in the dataplane to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable XDP refresh. [Default: `90`] | int | -| `XDPEnabled` | `FELIX_XDPENABLED` | Enable XDP acceleration for host endpoint policies. [Default: `true`] | boolean | - -### eBPF dataplane configuration - -eBPF dataplane mode uses the Linux Kernel's eBPF virtual machine to implement networking and policy instead of iptables. When BPFEnabled is set to `true`, Felix will: - -- Require a v5.3 Linux kernel. -- Implement policy with eBPF programs instead of iptables. -- Activate its embedded implementation of `kube-proxy` to implement Kubernetes service load balancing. -- Disable support for IPv6. - -See the [HOWTO guide](../../operations/ebpf/enabling-ebpf.mdx) for step-by step instructions to enable this feature. - -| Configuration parameter / Environment variable | Description | Schema | Default | -| ----------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ------- | -| BPFEnabled /
    FELIX_BPFENABLED | Enable eBPF dataplane mode. eBPF mode has a number of limitations, see the [HOWTO guide](../../operations/ebpf/enabling-ebpf.mdx). | true, false | false | -| BPFDisableUnprivileged /
    FELIX_BPFDISABLEUNPRIVILEGED | If true, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable unprivileged use of BPF. This ensures that unprivileged users cannot access Calico's BPF maps and cannot insert their own BPF programs to interfere with the ones that {{prodname}} installs. | true, false | true | -| BPFLogLevel /
    FELIX_BPFLOGLEVEL | The log level used by the BPF programs. The logs are emitted to the BPF trace pipe, accessible with the command `tc exec BPF debug`. | Off,Info,Debug | Off | -| BPFDataIfacePattern /
    FELIX_BPFDATAIFACEPATTERN | Controls which interfaces Felix should attach BPF programs to catch traffic to/from the external network. This needs to match the interfaces that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to NodePorts and services from outside the cluster. It should not match the workload interfaces (usually named cali...).. | regular expression | ^((en|wl|ww|sl|ib) [Popsvx].|*|(eth|wlan|wwan).|*|tunl0$ |vxlan.calico$ |wireguard.cali$ | wg-v6.cali$) | -| BPFL3IfacePattern /
    FELIX_BPFL3IFACEPATTERN | Allows to list tunnel devices like wireguard or vxlan (i.e., L3 devices) in addition to BPFDataIfacePattern. That is, tunnel interfaces not created by Calico, that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. | regular expression | "" | -| BPFConnectTimeLoadBalancingEnabled /
    FELIX_BPFCONNECTTIMELOADBALANCINGENABLED | Controls whether Felix installs the connect-time load balancer. In the current release, the connect-time load balancer is required for the host to reach kubernetes services. | true,false | true | -| BPFExternalServiceMode /
    FELIX_BPFEXTERNALSERVICEMODE | Controls how traffic from outside the cluster to NodePorts and ClusterIPs is handled. In Tunnel mode, packet is tunneled from the ingress host to the host with the backing pod and back again. In DSR mode, traffic is tunneled to the host with the backing pod and then returned directly; this requires a network that allows direct return. | Tunnel,DSR | Tunnel | -| BPFExtToServiceConnmark /
    FELIX_BPFEXTTOSERVICECONNMARK | Controls a 32bit mark that is set on connections from an external client to a local service. This mark allows us to control how packets of that connection are routed within the host and how is routing interpreted by RPF check. | int | 0 | -| BPFEnforceRPF /
    FELIX_BPFENFORCERPF | Enforce RPF on all host interfaces with BPF programs regardless of what is the per-interfaces or global setting. | Disabled,Strict,Loose | Strict | -| BPFKubeProxyIptablesCleanupEnabled /
    FELIX_BPFKUBEPROXYIPTABLESCLEANUPENABLED | Controls whether Felix will clean up the iptables rules created by the Kubernetes `kube-proxy`; should only be enabled if `kube-proxy` is not running. | true,false | true | -| BPFKubeProxyMinSyncPeriod /
    FELIX_BPFKUBEPROXYMINSYNCPERIOD | Controls the minimum time between dataplane updates for Felix's embedded `kube-proxy` implementation. | seconds | `1` | -| BPFKubeProxyEndpointSlicesEnabled /
    FELIX_BPFKUBEPROXYENDPOINTSLICESENABLED | Controls whether Felix's embedded kube-proxy derives its services from Kubernetes' EndpointSlices resources. Using EndpointSlices is more efficient but it requires EndpointSlices support to be enabled at the Kubernetes API server. | true,false | false | -| BPFMapSizeConntrack /
    FELIX_BPFMapSizeConntrack | Controls the size of the conntrack map. This map must be large enough to hold an entry for each active connection. Warning: changing the size of the conntrack map can cause disruption. | int | 512000 | -| BPFMapSizeNATFrontend /
    FELIX_BPFMapSizeNATFrontend | Controls the size of the NAT frontend map. FrontendMap should be large enough to hold an entry for each nodeport, external IP and each port in each service. | int | 65536 | -| BPFMapSizeNATBackend /
    FELIX_BPFMapSizeNATBackend | Controls the size of the NAT backend map. This is the total number of endpoints. This is mostly more than the size of the number of services. | int | 262144 | -| BPFMapSizeNATAffinity /
    FELIX_BPFMapSizeNATAffinity | Controls the size of the NAT affinity map. | int | 65536 | -| BPFMapSizeIPSets /
    FELIX_BPFMapSizeIPSets | Controls the size of the IPSets map. The IP sets map must be large enough to hold an entry for each endpoint matched by every selector in the source/destination matches in network policy. Selectors such as "all()" can result in large numbers of entries (one entry per endpoint in that case). | int | 1048576 | -| BPFMapSizeRoute /
    FELIX_BPFMapSizeRoute | Controls the size of the route map. The routes map should be large enough to hold one entry per workload and a handful of entries per host (enough to cover its own IPs and tunnel IPs). | int | 262144 | -| BPFHostConntrackBypass /
    FELIX_BPFHostConntrackBypass | Controls whether to bypass Linux conntrack in BPF mode for workloads and services. | true,false | true | -| BPFPolicyDebugEnabled /
    FELIX_BPFPOLICYDEBUGENABLED | In eBPF dataplane mode, Felix records detailed information about the BPF policy programs, which can be examined with the calico-bpf command-line tool. | true, false | true | - -### Kubernetes-specific configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | -------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------ | -| `KubeNodePortRanges` | `FELIX_KUBENODEPORTRANGES` | A list of port ranges that Felix should treat as Kubernetes node ports. Only when `kube-proxy` is configured to use IPVS mode: Felix assumes that traffic arriving at the host of one of these ports will ultimately be forwarded instead of being terminated by a host process. [Default: `30000:32767`]
    | Comma-delimited list of `:` port ranges or single ports. | - -:::note - - When using {{ prodname }} with Kubernetes' `kube-proxy` in IPVS mode, {{ prodname }} uses additional -iptables mark bits to store an ID for each local {{ prodname }} endpoint. For example, the default `IptablesMarkMask` value, -`0xffff0000` gives {{ prodname }} 16 bits, up to 6 of which are used for internal purposes, leaving 10 bits for endpoint -IDs. 10 bits is enough for 1024 different values and {{ prodname }} uses 2 of those for internal purposes, leaving enough -for 1022 endpoints on the host. - -::: - -### OpenStack-specific configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- | -| `MetadataAddr` | `FELIX_METADATAADDR` | The IP address or domain name of the server that can answer VM queries for cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in Ubuntu, nova-api-metadata). A value of `none` (case-insensitive) means that Felix should not set up any NAT rule for the metadata path. [Default: `127.0.0.1`] | ``, ``, `none` | -| `MetadataPort` | `FELIX_METADATAPORT` | The port of the metadata server. This, combined with global.MetadataAddr (if not 'None'), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. In most cases this should not need to be changed [Default: `8775`]. | int | -| `OpenStackRegion` | `FELIX_OPENSTACKREGION` | In a [multi-region deployment](../../networking/openstack/multiple-regions.mdx), the name of the region that this Felix is in. [Default: none]. | string\* | - -\* If non-empty, the value specified for `OpenStackRegion` must be a -string of lower case alphanumeric characters or '-', starting and -ending with an alphanumeric character. - -### Bare metal specific configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `InterfacePrefix` | `FELIX_INTERFACEPREFIX` | The interface name prefix that identifies workload endpoints and so distinguishes them from host endpoint interfaces. Accepts more than one interface name prefix in comma-delimited format, e.g., `tap,cali`. Note: in environments other than bare metal, the orchestrators configure this appropriately. For example our Kubernetes and Docker integrations set the `cali` value, and our OpenStack integration sets the `tap` value. [Default: `cali`] | string | - -### Felix-Typha Configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | --------------------------- | ----------------------------------------------------------------------------------------- | ------ | -| `TyphaAddr` | `FELIX_TYPHAADDR` | Address of the Typha Server when running outside a K8S Cluster, in the format IP:PORT | string | -| `TyphaK8sServiceName` | `FELIX_TYPHAK8SSERVICENAME` | Service Name of Typha Deployment when running inside a K8S Cluster | string | -| `TyphaK8sNamespace` | `FELIX_TYPHAK8SNAMESPACE` | Namespace of Typha Deployment when running inside a K8S Cluster. [Default: `kube-system`] | string | -| `TyphaReadTimeout` | `FELIX_TYPHAREADTIMEOUT` | Timeout of Felix when reading information from Typha, in seconds. [Default: 30] | int | -| `TyphaWriteTimeout` | `FELIX_TYPHAWRITETIMEOUT` | Timeout of Felix when writing information to Typha, in seconds. [Default: 30] | int | - -### Felix-Typha TLS configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | --------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `TyphaCAFile` | `FELIX_TYPHACAFILE` | Path to the file containing the root certificate of the CA that issued the Typha server certificate. Configures Felix to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing Felix to trust each of the CAs included. Example: `/etc/felix/ca.pem` | string | -| `TyphaCertFile` | `FELIX_TYPHACERTFILE` | Path to the file containing the client certificate issued to Felix. Enables Felix to participate in mutual TLS authentication and identify itself to the Typha server. Example: `/etc/felix/cert.pem` | string | -| `TyphaCN` | `FELIX_TYPHACN` | If set, the `Common Name` that Typha's certificate must have. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `TyphaURISAN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | -| `TyphaKeyFile` | `FELIX_TYPHAKEYFILE` | Path to the file containing the private key matching the Felix client certificate. Enables Felix to participate in mutual TLS authentication and identify itself to the Typha server. Example: `/etc/felix/key.pem` (optional) | string | -| `TyphaURISAN` | `FELIX_TYPHAURISAN` | If set, a URI SAN that Typha's certificate must have. We recommend populating this with a [SPIFFE](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#2-spiffe-identity) string that identifies Typha. All Typha instances should use the same SPIFFE ID. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `TyphaCN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | - -For more information on how to use and set these variables, refer to -[Connections from Felix to Typha (Kubernetes)](../../network-policy/comms/crypto-auth.mdx#connections-from-felix-to-typha-kubernetes). - -### WireGuard configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | ------- | -------------- | -| wireguardEnabled | Enable encryption for IPv4 on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardEnabledV6 | Enable encryption for IPv6 on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardInterfaceName | Name of the IPv4 WireGuard interface created by Felix. If you change the name, and want to clean up the previously-configured interface names on each node, this is a manual process. | string | string | wireguard.cali | -| wireguardInterfaceNameV6 | Name of the IPv6 WireGuard interface created by Felix. If you change the name, and want to clean up the previously-configured interface names on each node, this is a manual process. | string | string | wg-v6.cali | -| wireguardListeningPort | Port used by IPv4 WireGuard tunnels. Felix sets up an IPv4 WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51820 | -| wireguardListeningPortV6 | Port used by IPv6 WireGuard tunnels. Felix sets up an IPv6 WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51821 | -| wireguardMTU | MTU set on the IPv4 WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardMTUV6 | MTU set on the IPv6 WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardRoutingRulePriority | WireGuard routing rule priority value set up by Felix. If you change the default value, set it to a value most appropriate to routing rules for your nodes. | 1-32765 | int | 99 | -| wireguardHostEncryptionEnabled | **Experimental**: Adds host-namespace workload IP's to WireGuard's list of peers. Should **not** be enabled when WireGuard is enabled on a cluster's control plane node, as networking deadlock can occur. | true, false | boolean | false | -| wireguardKeepAlive | WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0] | int | int | 25 | - -For more information on encrypting in-cluster traffic with WireGuard, refer to -[Encrypt cluster pod traffic](../../network-policy/encrypt-cluster-pod-traffic.mdx) - -## Environment variables - -The highest priority of configuration is that read from environment -variables. To set a configuration parameter via an environment variable, -set the environment variable formed by taking `FELIX_` and appending the -uppercase form of the variable name. For example, to set the etcd -address, set the environment variable `FELIX_ETCDADDR`. Other examples -include `FELIX_ETCDSCHEME`, `FELIX_ETCDKEYFILE`, `FELIX_ETCDCERTFILE`, -`FELIX_ETCDCAFILE`, `FELIX_FELIXHOSTNAME`, `FELIX_LOGFILEPATH` and -`FELIX_METADATAADDR`. - -## Configuration file - -On startup, Felix reads an ini-style configuration file. The path to -this file defaults to `/etc/calico/felix.cfg` but can be overridden -using the `-c` or `--config-file` options on the command line. If the -file exists, then it is read (ignoring section names) and all parameters -are set from it. - -In OpenStack, we recommend putting all configuration into configuration -files, since the etcd database is transient (and may be recreated by the -OpenStack plugin in certain error cases). However, in a Docker -environment the use of environment variables or etcd is often more -convenient. - -## Datastore - -Felix also reads configuration parameters from the datastore. It supports -a global setting and a per-host override. - -1. Get the current felixconfig settings. - - ```bash - calicoctl get felixconfig default -o yaml --export > felix.yaml - ``` - -1. Modify logFilePath to your intended path, e.g. "/tmp/felix.log" - - ```bash - vim felix.yaml - ``` - - :::tip - - For a global change set name to "default". - For a node-specific change: set name to `node.`, e.g. "node.{{prodname}}-node-1" - - ::: - -1. Replace the current felixconfig settings - - ```bash - calicoctl replace -f felix.yaml - ``` - -For more information, see [Felix Configuration Resource](../resources/felixconfig.mdx). - -
    - - diff --git a/calico_versioned_docs/version-3.25/reference/felix/index.mdx b/calico_versioned_docs/version-3.25/reference/felix/index.mdx deleted file mode 100644 index b7c1e2d1c1..0000000000 --- a/calico_versioned_docs/version-3.25/reference/felix/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Felix is a Calico component that runs on every machine that provides endpoints. -hide_table_of_contents: true ---- - -# Felix - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/felix/prometheus.mdx b/calico_versioned_docs/version-3.25/reference/felix/prometheus.mdx deleted file mode 100644 index 728b1e7ed9..0000000000 --- a/calico_versioned_docs/version-3.25/reference/felix/prometheus.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: Review metrics for the Felix component if you are using Prometheus. ---- - -# Prometheus metrics - -Felix can be configured to report a number of metrics through Prometheus. See the -[configuration reference](configuration.mdx) for how to enable metrics reporting. - -## Metric reference - -#### Felix specific - -Felix exports a number of Prometheus metrics. The current set is as follows. Since some metrics -are tied to particular implementation choices inside Felix we can't make any hard guarantees that -metrics will persist across releases. However, we aim not to make any spurious changes to -existing metrics. - -| Name | Description | -| ------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | -| `felix_active_local_endpoints` | Number of active endpoints on this host. | -| `felix_active_local_policies` | Number of active policies on this host. | -| `felix_active_local_selectors` | Number of active selectors on this host. | -| `felix_calc_graph_output_events` | Number of events emitted by the calculation graph. | -| `felix_calc_graph_update_time_seconds` | Seconds to update calculation graph for each datastore OnUpdate call. | -| `felix_calc_graph_updates_processed` | Number of datastore updates processed by the calculation graph. | -| `felix_cluster_num_host_endpoints` | Total number of host endpoints cluster-wide. | -| `felix_cluster_num_hosts` | Total number of {{prodname}} hosts in the cluster. | -| `felix_cluster_num_workload_endpoints` | Total number of workload endpoints cluster-wide. | -| `felix_exec_time_micros` | Summary of time taken to fork/exec child processes | -| `felix_int_dataplane_addr_msg_batch_size` | Number of interface address messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | -| `felix_int_dataplane_apply_time_seconds` | Time in seconds that it took to apply a dataplane update. | -| `felix_int_dataplane_failures` | Number of times dataplane updates failed and will be retried. | -| `felix_int_dataplane_iface_msg_batch_size` | Number of interface state messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | -| `felix_int_dataplane_messages` | Number dataplane messages by type. | -| `felix_int_dataplane_msg_batch_size` | Number of messages processed in each batch. Higher values indicate we're doing more batching to try to keep up. | -| `felix_ipset_calls` | Number of ipset commands executed. | -| `felix_ipset_errors` | Number of ipset command failures. | -| `felix_ipset_lines_executed` | Number of ipset operations executed. | -| `felix_ipsets_calico` | Number of active {{prodname}} IP sets. | -| `felix_ipsets_total` | Total number of active IP sets. | -| `felix_iptables_chains` | Number of active iptables chains. | -| `felix_iptables_lines_executed` | Number of iptables rule updates executed. | -| `felix_iptables_restore_calls` | Number of iptables-restore calls. | -| `felix_iptables_restore_errors` | Number of iptables-restore errors. | -| `felix_iptables_rules` | Number of active iptables rules. | -| `felix_iptables_save_calls` | Number of iptables-save calls. | -| `felix_iptables_save_errors` | Number of iptables-save errors. | -| `felix_resync_state` | Current datastore state. | -| `felix_resyncs_started` | Number of times Felix has started resyncing with the datastore. | -| `felix_route_table_list_seconds` | Time taken to list all the interfaces during a resync. | -| `felix_route_table_per_iface_sync_seconds` | Time taken to sync each interface | - -Prometheus metrics are self-documenting, with metrics turned on, `curl` can be used to list the -metrics along with their help text and type information. - -```bash -curl -s http://localhost:9091/metrics | head -``` - -Example response: - -``` -# HELP felix_active_local_endpoints Number of active endpoints on this host. -# TYPE felix_active_local_endpoints gauge -felix_active_local_endpoints 91 -# HELP felix_active_local_policies Number of active policies on this host. -# TYPE felix_active_local_policies gauge -felix_active_local_policies 0 -# HELP felix_active_local_selectors Number of active selectors on this host. -# TYPE felix_active_local_selectors gauge -felix_active_local_selectors 82 -... -``` - -#### CPU / memory metrics - -Felix also exports the default set of metrics that Prometheus makes available. Currently, those -include: - -| Name | Description | -| --------------------------------------- | ------------------------------------------------------------------ | -| `go_gc_duration_seconds` | A summary of the GC invocation durations. | -| `go_goroutines` | Number of goroutines that currently exist. | -| `go_memstats_alloc_bytes` | Number of bytes allocated and still in use. | -| `go_memstats_alloc_bytes_total` | Total number of bytes allocated, even if freed. | -| `go_memstats_buck_hash_sys_bytes` | Number of bytes used by the profiling bucket hash table. | -| `go_memstats_frees_total` | Total number of frees. | -| `go_memstats_gc_sys_bytes` | Number of bytes used for garbage collection system metadata. | -| `go_memstats_heap_alloc_bytes` | Number of heap bytes allocated and still in use. | -| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. | -| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. | -| `go_memstats_heap_objects` | Number of allocated objects. | -| `go_memstats_heap_released_bytes_total` | Total number of heap bytes released to OS. | -| `go_memstats_heap_sys_bytes` | Number of heap bytes obtained from system. | -| `go_memstats_last_gc_time_seconds` | Number of seconds since 1970 of last garbage collection. | -| `go_memstats_lookups_total` | Total number of pointer lookups. | -| `go_memstats_mallocs_total` | Total number of mallocs. | -| `go_memstats_mcache_inuse_bytes` | Number of bytes in use by mcache structures. | -| `go_memstats_mcache_sys_bytes` | Number of bytes used for mcache structures obtained from system. | -| `go_memstats_mspan_inuse_bytes` | Number of bytes in use by mspan structures. | -| `go_memstats_mspan_sys_bytes` | Number of bytes used for mspan structures obtained from system. | -| `go_memstats_next_gc_bytes` | Number of heap bytes when next garbage collection will take place. | -| `go_memstats_other_sys_bytes` | Number of bytes used for other system allocations. | -| `go_memstats_stack_inuse_bytes` | Number of bytes in use by the stack allocator. | -| `go_memstats_stack_sys_bytes` | Number of bytes obtained from system for stack allocator. | -| `go_memstats_sys_bytes` | Number of bytes obtained by system. Sum of all system allocations. | -| `process_cpu_seconds_total` | Total user and system CPU time spent in seconds. | -| `process_max_fds` | Maximum number of open file descriptors. | -| `process_open_fds` | Number of open file descriptors. | -| `process_resident_memory_bytes` | Resident memory size in bytes. | -| `process_start_time_seconds` | Start time of the process since unix epoch in seconds. | -| `process_virtual_memory_bytes` | Virtual memory size in bytes. | - -#### Wireguard Metrics - -Felix also exports wireguard device stats if found/detected. Can be disabled via Felix configuration. - -| Name | Description | -| ------------------------------------ | ------------------------------------------------------------------------------------------------- | -| `wireguard_meta` | Gauge. Device / interface information for a felix/calico node, values are in this metric's labels | -| `wireguard_bytes_rcvd` | Counter. Current bytes received from a peer identified by a peer public key and endpoint | -| `wireguard_bytes_sent` | Counter. Current bytes sent to a peer identified by a peer public key and endpoint | -| `wireguard_latest_handshake_seconds` | Gauge. Last handshake with a peer, unix timestamp in seconds. | diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/connectivity.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/connectivity.mdx deleted file mode 100644 index b7a6ff3e8e..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/connectivity.mdx +++ /dev/null @@ -1,92 +0,0 @@ ---- -description: Customize the Calico failsafe policy to protect host endpoints. ---- - -# Creating policy for basic connectivity - -When a host endpoint is added, if there is no security policy for that -endpoint, {{prodname}} will default to denying traffic to/from that endpoint, -except for traffic that is allowed by the [failsafe rules](failsafe.mdx). - -While the [failsafe rules](failsafe.mdx) provide protection against removing all -connectivity to a host: - -- They are overly broad in allowing inbound SSH on any interface and - allowing traffic out to etcd's ports on any interface. -- Depending on your network, they may not cover all the ports that are - required; for example, your network may rely on allowing ICMP, - or DHCP. - -Therefore, we recommend creating a failsafe {{prodname}} security policy that -is tailored to your environment. The example command below shows one -example of how you might do that; the command uses `calicoctl` to create a single -policy resource, which: - -- Applies to all known endpoints. -- Allows inbound ssh access from a defined "management" subnet. -- Allows outbound connectivity to etcd on a particular IP; if - you have multiple etcd servers you should duplicate the rule - for each destination. -- Allows inbound ICMP. -- Allows outbound UDP on port 67, for DHCP. - -When running this command, replace the placeholders in angle brackets with -appropriate values for your deployment. - - - -```bash -cat <" - destination: - ports: [22] - - action: Allow - protocol: ICMP - egress: - - action: Allow - protocol: TCP - destination: - nets: [/32] - ports: [] - - action: Allow - protocol: TCP - destination: - nets: [] - - action: Allow - protocol: UDP - destination: - ports: [67] -EOF -``` - -Once you have such a policy in place, you may want to disable the -[failsafe rules](failsafe.mdx). - -:::note - -Packets that reach the end of the list of rules fall-through to the -next policy (sorted by the `order` field). -The selector in the policy, `all()`, will match _all_ endpoints, -including any workload endpoints. If you have workload endpoints as -well as host endpoints then you may wish to use a more restrictive -selector. For example, you could label management interfaces with -label `endpoint_type = management` and then use selector -`endpoint_type == "management"` -If you are using {{prodname}} for networking workloads, you should add -inbound and outbound rules to allow BGP: add an ingress and egress rule -to allow TCP traffic to destination port 179. - -::: diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/conntrack.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/conntrack.mdx deleted file mode 100644 index f4da1f9f4c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/conntrack.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -description: Workaround for Linux conntrack if Calico policy is not working as it should. ---- - -# Connection tracking - -{{prodname}} uses Linux's connection tracking ('conntrack') as an important -optimization to its processing. It generally means that {{prodname}} only needs to -check its policies for the first packet in an allowed flow—between a pair of -IP addresses and ports—and then conntrack automatically allows further -packets in the same flow, without {{prodname}} rechecking every packet. - -This can, however, make it look like a {{prodname}} policy is not working as it -should, if policy is changed to disallow a flow that was previously allowed. -If packets were recently exchanged on the previously allowed flow, and so there -is conntrack state for that flow that has not yet expired, that conntrack state -will allow further packets between the same IP addresses and ports, even after -the {{prodname}} policy has been changed. - -Per {{prodname}}'s current implementation, there are two workarounds for this: - -- Somehow ensure that no further packets flow between the relevant IP - addresses and ports until the conntrack state has expired (typically about - a minute). - -- Use the 'conntrack' tool to delete the relevant conntrack state; for example - `conntrack -D -p tcp --orig-port-dst 80`. - -Then you should observe that the new {{prodname}} policy is enforced for new packets. diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/failsafe.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/failsafe.mdx deleted file mode 100644 index e3bd9ec4cd..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/failsafe.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -description: Avoid cutting off connectivity to hosts because of incorrect network policies. ---- - -# Failsafe rules - -To avoid completely cutting off a host via incorrect or malformed -policy, {{prodname}} has a failsafe mechanism that keeps various pinholes open -in the firewall. - -By default, {{prodname}} keeps the following ports open on _all_ host endpoints: - -| Port | Protocol | Direction | Purpose | -| ---- | -------- | ------------------ | ------------------------------- | -| 22 | TCP | Inbound | SSH access | -| 53 | UDP | Outbound | DNS queries | -| 67 | UDP | Outbound | DHCP access | -| 68 | UDP | Inbound | DHCP access | -| 179 | TCP | Inbound & Outbound | BGP access (Calico networking) | -| 2379 | TCP | Inbound & Outbound | etcd access | -| 2380 | TCP | Inbound & Outbound | etcd access | -| 5473 | TCP | Inbound & Outbound | etcd access | -| 6443 | TCP | Inbound & Outbound | Kubernetes API server access | -| 6666 | TCP | Inbound & Outbound | etcd self-hosted service access | -| 6667 | TCP | Inbound & Outbound | etcd self-hosted service access | - -The lists of failsafe ports can be configured via the configuration parameters -`FailsafeInboundHostPorts` and `FailsafeOutboundHostPorts` -described in [Configuring Felix](../felix/configuration.mdx) -. They -can be disabled by setting each configuration value to "[]". - -:::note - -Removing the inbound failsafe rules can leave a host inaccessible. - -Removing the outbound failsafe rules can leave Felix unable to connect -to etcd. - -Before disabling the failsafe rules, we recommend creating a policy to -replace it with more-specific rules for your environment: see -[Creating policy for basic connectivity](connectivity.mdx). - -::: diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/forwarded.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/forwarded.mdx deleted file mode 100644 index f272d01f9c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/forwarded.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: Learn the subtleties using the applyOnForward option in host endpoint policies. ---- - -# Apply on forwarded traffic - -If `applyOnForward` is `false`, the host endpoint policy applies to traffic to/from -local processes only. - -If `applyOnForward` is `true`, the host endpoint policy also applies to forwarded traffic: - -- Traffic that comes in via a host endpoint and is forwarded to a local workload (container/pod/VM). -- Traffic from a local workload that is forwarded out via a host endpoint. -- Traffic that comes in via a host endpoint and is forwarded out via another host endpoint. - -By default, `applyOnForward` is `false`. - -Untracked policies and pre-DNAT policies must have `applyOnForward` set to `true` -because they apply to all forwarded traffic. - -Forwarded traffic is allowed by default if no policies apply to the endpoint and direction. In -other words, if a host endpoint is configured, but there are no policies with `applyOnForward` -set to `true` that apply to that host endpoint and traffic direction, forwarded traffic is -allowed in that direction. For example if a forwarded flow is incoming via a host endpoint, but there are -no Ingress policies with `applyOnForward: true` that apply to that host endpoint, the flow is -allowed. If there are `applyOnForward: true` policies that select the host endpoint and direction, -but no rules in the policies allow the traffic, the traffic is denied. - -This is different from how {{prodname}} treats traffic to or from a local process: -if a host endpoint is configured and there are no policies that select the host endpoint in -the traffic direction, or no rules that allow the traffic, the traffic is denied. - -Traffic that traverses a host endpoint and is forwarded to a workload endpoint must also pass -the applicable workload endpoint policy, if any. That is to say, if an `applyOnForward: true` host -endpoint policy allows the traffic, but workload endpoint policy denies it, the packet is still dropped. - -Traffic that ingresses one host endpoint, is forwarded, and egresses host endpoint must -pass ingress policy on the first host endpoint and egress policy on the second host endpoint. - -:::note - -{{prodname}}'s handling of host endpoint policy has changed, since before -Calico v3.0, in two ways: - -- It will not apply at all to forwarded traffic, by default. If you have an existing - policy and you want it to apply to forwarded traffic, you need to add `applyOnForward: true` to the policy. -- Even with `applyOnForward: true`, the treatment is not quite the same in - Calico v3.0 as in previous releases, because–once a host endpoint is configured– - Calico v3.0 allows forwarded traffic through that endpoint by default, whereas - previous releases denied forwarded traffic through that endpoint by default. - If you want to maintain the default-deny behavior for all host-endpoint forwarded - traffic, you can create an empty policy with `applyOnForward` set to `true` - that applies to all traffic on all host endpoints. - -::: - -```bash -calicoctl apply -f - < diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/objects.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/objects.mdx deleted file mode 100644 index 91e1e7ad4e..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/objects.mdx +++ /dev/null @@ -1,124 +0,0 @@ ---- -description: To protect a host interface, start by creating a host endpoint object in etcd. ---- - -# Creating host endpoint objects - -For each host endpoint that you want {{prodname}} to secure, you'll need to -create a host endpoint object in etcd. Use the `calicoctl create` command -to create a host endpoint resource (`HostEndpoint`). - -There are two ways to specify the interface that a host endpoint should -refer to. You can either specify the name of the interface or its -expected IP address. In either case, you'll also need to know the name given to -the {{prodname}} node running on the host that owns the interface; in most cases this -will be the same as the hostname of the host. - -For example, to secure the interface named `eth0` with IP 10.0.0.1 on -host `my-host`, run the command below. The name of the endpoint is an -arbitrary name required for endpoint identification. - -When running this command, replace the placeholders in angle brackets with -appropriate values for your deployment. - -```bash -calicoctl create -f - < - labels: - role: webserver - environment: production - spec: - interfaceName: eth0 - node: - profiles: [] - expectedIPs: ["10.0.0.1"] -EOF -``` - -:::note - -Felix tries to detect the correct hostname for a system. It logs -out the value it has determined at start-of-day in the following -format: -`2015-10-20 17:42:09,813 \[INFO\]\[30149/5\] calico.felix.config 285: Parameter FelixHostname (Felix compute host hostname) has value 'my-hostname' read from None` -The value (in this case `'my-hostname'`) needs to match the hostname -used in etcd. Ideally, the host's system hostname should be set -correctly but if that's not possible, the Felix value can be -overridden with the FelixHostname configuration setting. See -configuration for more details. - -::: - -Where `` is an optional list of security profiles -to apply to the endpoint and labels contains a set of arbitrary -key/value pairs that can be used in selector expressions. - - - -:::note - -When rendering security rules on other hosts, {{prodname}} uses the -`expectedIPs` field to resolve label selectors -to IP addresses. If the `expectedIPs` field is omitted -then security rules that use labels will fail to match -this endpoint. -Or, if you knew that the IP address should be 10.0.0.1, but not the name -of the interface: - -```bash -calicoctl create -f - < - labels: - role: webserver - environment: production - spec: - node: - profiles: [] - expectedIPs: ["10.0.0.1"] -EOF -``` - -::: - -After you create host endpoint objects, Felix will start policing -traffic to/from that interface. If you have no policy or profiles in -place, then you should see traffic being dropped on the interface. - -:::note - -By default, {{prodname}} has a failsafe in place that allows certain -traffic such as ssh. See below for more details on -disabling/configuring the failsafe rules. - -::: - -If you don't see traffic being dropped, check the hostname, IP address -and (if used) the interface name in the configuration. If there was -something wrong with the endpoint data, Felix will log a validation -error at `WARNING` level and it will ignore the endpoint: - -A `grep` through the Felix logs for the string "Validation failed" should allow -you to locate the error. - -```bash -grep "Validation failed" /var/log/calico/felix.log -``` - -An example error follows. - -``` -2016-05-31 12:16:21,651 [WARNING][8657/3] calico.felix.fetcd 1017: - Validation failed for host endpoint HostEndpointId, treating as - missing: 'name' or 'expected_ipvX_addrs' must be present.; - '{ "labels": {"foo": "bar"}, "profile_ids": ["prof1"]}' -``` - -The error can be quite long but it should log the precise cause of the -rejection; in this case `'name' or 'expected\_ipvX\_addrs' must be present` tells us that either the interface's name or its expected IP -address must be specified. diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/overview.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/overview.mdx deleted file mode 100644 index ce60071a49..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/overview.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -description: Secure host network interfaces. ---- - -# Host endpoints - -This guide describes how to use {{prodname}} to secure the network interfaces -of the host itself (as opposed to those of any container/VM workloads -that are present on the host). We call such interfaces "host endpoints", -to distinguish them from "workload endpoints" (such as containers or VMs). - -{{prodname}} supports the same rich security policy model for host endpoints (host -endpoint policy) that it supports for workload endpoints. Host endpoints can -have labels, and their labels are in the same "namespace" as those of workload -endpoints. This allows security rules for either type of endpoint to refer to -the other type (or a mix of the two) using labels and selectors. - -{{prodname}} does not support setting IPs or policing MAC addresses for host -interfaces, it assumes that the interfaces are configured by the -underlying network fabric. - -{{prodname}} distinguishes workload endpoints from host endpoints by a configurable -prefix. Unless you happen to have host interfaces whose name matches the -default for that prefix (`cali`), you won't need to change it. In case you do, -see the `InterfacePrefix` configuration value at [Configuring Felix](../felix/configuration.mdx) -. -Interfaces that start with a value listed in `InterfacePrefix` are assumed to -be workload interfaces. Others are treated as host interfaces. - -{{prodname}} blocks all traffic to/from workload interfaces by default; -allowing traffic only if the interface is known and policy is in place. -However, for host endpoints, {{prodname}} is more lenient; it only polices -traffic to/from interfaces that it's been explicitly told about. Traffic -to/from other interfaces is left alone. - -You can use host endpoint policy to secure a NAT gateway or router. {{prodname}} -supports selector-based policy when running on a gateway or router, allowing for -rich, dynamic security policy based on the labels attached to your host endpoints. - -You can apply host endpoint policies to three types of traffic: - -- Traffic that is terminated locally. -- Traffic that is forwarded between host endpoints. -- Traffic that is forwarded between a host endpoint and a workload endpoint on the - same host. - -Set the `applyOnForward` flag to `true` to apply a policy to forwarded traffic. -See [GlobalNetworkPolicy spec](../resources/globalnetworkpolicy.mdx#spec). - -:::note - -Both traffic forwarded between host endpoints and traffic forwarded -between a host endpoint and a workload endpoint on the same host is regarded as -`forwarded traffic`. -![](/img/calico/bare-metal-packet-flows.svg) - -::: diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/pre-dnat.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/pre-dnat.mdx deleted file mode 100644 index f4cfc95087..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/pre-dnat.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: Apply rules in a host endpoint policy before any DNAT. ---- - -# Pre-DNAT policy - -Policy for host endpoints can be marked as `preDNAT`. This means that rules in -that policy should be applied before any DNAT (Destination Network Address -Translation), which is useful if it is more convenient to specify {{prodname}} policy -in terms of a packet's original destination IP address and port, than in terms -of that packet's destination IP address and port after it has been DNAT'd. - -An example is securing access to Kubernetes NodePorts from outside the cluster. -Traffic from outside is addressed to any node's IP address, on a known -NodePort, and Kubernetes (kube-proxy) then DNATs that to the IP address of one -of the pods that provides the corresponding service, and the relevant port -number on that pod (which is usually different from the NodePort). - -As NodePorts are the externally advertised way of connecting to services (and a -NodePort uniquely identifies a service, whereas an internal port number may -not), it makes sense to express {{prodname}} policy to expose or secure particular -Services in terms of the corresponding NodePorts. But that is only possible if -the {{prodname}} policy is applied before DNAT changes the NodePort to something -else. Hence this kind of policy needs `preDNAT` set to `true`. - -In addition to being applied before any DNAT, the enforcement of pre-DNAT -policy differs from that of normal host endpoint policy in three key details, -reflecting that it is designed for the policing of incoming traffic from -outside the cluster: - -- Pre-DNAT policy may only have ingress rules, not egress. (When incoming - traffic is allowed by the ingress rules, standard connection tracking is - sufficient to allow the return path traffic.) - -- Pre-DNAT policy is enforced for all traffic arriving through a host - endpoint, regardless of where that traffic is going, and - in particular - - even if that traffic is routed to a local workload on the same host. - (Whereas normal host endpoint policy is skipped, for traffic going to a - local workload.) - -- There is no 'default drop' semantic for pre-DNAT policy (as there is for - normal host endpoint policy). In other words, if a host endpoint is defined - but has no pre-DNAT policies that explicitly allow or deny a particular - incoming packet, that packet is allowed to continue on its way, and will - then be accepted or dropped according to workload policy (if it is going to - a local workload) or to normal host endpoint policy (if not). diff --git a/calico_versioned_docs/version-3.25/reference/host-endpoints/selector.mdx b/calico_versioned_docs/version-3.25/reference/host-endpoints/selector.mdx deleted file mode 100644 index 9fc2d1bc4d..0000000000 --- a/calico_versioned_docs/version-3.25/reference/host-endpoints/selector.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -description: Apply ordered policies to endpoints that match specific label selectors. ---- - -# Selector-based policies - -We recommend using selector-based security policy with -host endpoints. This allows ordered policy to be applied to -endpoints that match particular label selectors. - -For example, you could add a second policy for webserver access: - -```bash -cat < diff --git a/calico_versioned_docs/version-3.25/reference/installation/_README.mdx b/calico_versioned_docs/version-3.25/reference/installation/_README.mdx deleted file mode 100644 index a35570764b..0000000000 --- a/calico_versioned_docs/version-3.25/reference/installation/_README.mdx +++ /dev/null @@ -1,7 +0,0 @@ -# Generating API reference docs - -The api.html doc in this directory is generated using https://github.com/tmjd/gen-crd-api-reference-docs/tree/kb_v2. - -To generate an updated file, change to the root of the docs repository and run -the appropriate Makefile target. See the `README.md` file for more details on -how to list available targets and which ones to run. \ No newline at end of file diff --git a/calico_versioned_docs/version-3.25/reference/installation/_api.mdx b/calico_versioned_docs/version-3.25/reference/installation/_api.mdx deleted file mode 100644 index 9442b811eb..0000000000 --- a/calico_versioned_docs/version-3.25/reference/installation/_api.mdx +++ /dev/null @@ -1,7281 +0,0 @@ -

    -Packages: -

    - -

    operator.tigera.io/v1

    -

    -API Schema definitions for configuring the installation of Calico and Calico Enterprise -

    -Resource Types: - -

    APIServer

    -

    -APIServer installs the Tigera API server and related resources. At most one instance -of this resource is supported. It must be named “default” or “tigera-secure”. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -apiVersion
    -string -
    - - -operator.tigera.io/v1 - - -
    - -kind
    -string - -
    -APIServer -
    - -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - - -
    - -Refer to the Kubernetes API documentation for the fields of the -metadata field. - -
    - -spec
    - - -APIServerSpec - - - -
    - -

    -Specification of the desired state for the Tigera API server. -

    -
    -
    - - - - - -
    -apiServerDeployment
    - - -APIServerDeployment - - - -
    - -

    -APIServerDeployment configures the calico-apiserver (or tigera-apiserver in Enterprise) Deployment. If -used in conjunction with ControlPlaneNodeSelector or ControlPlaneTolerations, then these overrides -take precedence. -

    - -
    -
    - -status
    - - -APIServerStatus - - - -
    - -

    -Most recently observed status for the Tigera API server. -

    - -
    -

    ApplicationLayer

    -

    -ApplicationLayer is the Schema for the applicationlayers API -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -apiVersion
    -string -
    - - -operator.tigera.io/v1 - - -
    - -kind
    -string - -
    -ApplicationLayer -
    - -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - - -
    - -Refer to the Kubernetes API documentation for the fields of the -metadata field. - -
    - -spec
    - - -ApplicationLayerSpec - - - -
    - -
    -
    - - - - - - - - - - - - - -
    -webApplicationFirewall
    - - -WAFStatusType - - - -
    - -

    -WebApplicationFirewall controls whether or not ModSecurity enforcement is enabled for the cluster. -When enabled, Services may opt-in to having ingress traffic examined by ModSecurity. -

    - -
    - -logCollection
    - - -LogCollectionSpec - - - -
    - -

    -Specification for application layer (L7) log collection. -

    - -
    - -applicationLayerPolicy
    - - -ApplicationLayerPolicyStatusType - - - -
    - -

    -Application Layer Policy controls whether or not ALP enforcement is enabled for the cluster. -When enabled, NetworkPolicies with HTTP Match rules may be defined to opt-in workloads for traffic enforcement on the application layer. -

    - -
    -
    - -status
    - - -ApplicationLayerStatus - - - -
    - -
    -

    EgressGateway

    -

    -EgressGateway is the Schema for the egressgateways API -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -apiVersion
    -string -
    - - -operator.tigera.io/v1 - - -
    - -kind
    -string - -
    -EgressGateway -
    - -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - - -
    - -Refer to the Kubernetes API documentation for the fields of the -metadata field. - -
    - -spec
    - - -EgressGatewaySpec - - - -
    - -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -replicas
    - -int32 - - -
    - -(Optional) -

    -Replicas defines how many instances of the Egress Gateway pod will run. -

    - -
    - -ipPools
    - - -[]EgressGatewayIPPool - - - -
    - -

    -IPPools defines the IP Pools that the Egress Gateway pods should be using. -Either name or CIDR must be specified. -IPPools must match existing IPPools. -

    - -
    - -externalNetworks
    - -[]string - - -
    - -(Optional) -

    -ExternalNetworks defines the external network names this Egress Gateway is -associated with. -ExternalNetworks must match existing external networks. -

    - -
    - -logSeverity
    - - -LogLevel - - - -
    - -(Optional) -

    -LogSeverity defines the logging level of the Egress Gateway. -Default: Info -

    - -
    - -template
    - - -EgressGatewayDeploymentPodTemplateSpec - - - -
    - -(Optional) -

    -Template describes the EGW Deployment pod that will be created. -

    - -
    - -egressGatewayFailureDetection
    - - -EgressGatewayFailureDetection - - - -
    - -(Optional) -

    -EgressGatewayFailureDetection is used to configure how Egress Gateway -determines readiness. If both ICMP, HTTP probes are defined, one ICMP probe and one -HTTP probe should succeed for Egress Gateways to become ready. -Otherwise one of ICMP or HTTP probe should succeed for Egress gateways to become -ready if configured. -

    - -
    - -aws
    - - -AWSEgressGateway - - - -
    - -(Optional) -

    -AWS defines the additional configuration options for Egress Gateways on AWS. -

    - -
    -
    - -status
    - - -EgressGatewayStatus - - - -
    - -
    -

    ImageSet

    -

    -ImageSet is used to specify image digests for the images that the operator deploys. -The name of the ImageSet is expected to be in the format <variant>-<release>. -The variant used is enterprise if the InstallationSpec Variant is -TigeraSecureEnterprise otherwise it is calico. -The release must match the version of the variant that the operator is built to deploy, -this version can be obtained by passing the --version flag to the operator binary. -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -apiVersion
    -string -
    - - -operator.tigera.io/v1 - - -
    - -kind
    -string - -
    -ImageSet -
    - -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - - -
    - -Refer to the Kubernetes API documentation for the fields of the -metadata field. - -
    - -spec
    - - -ImageSetSpec - - - -
    - -
    -
    - - - - - -
    -images
    - - -[]Image - - - -
    - -

    -Images is the list of images to use digests. All images that the operator will deploy -must be specified. -

    - -
    -
    -

    Installation

    -

    -Installation configures an installation of Calico or Calico Enterprise. At most one instance -of this resource is supported. It must be named “default”. The Installation API installs core networking -and network policy components, and provides general install-time configuration. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -apiVersion
    -string -
    - - -operator.tigera.io/v1 - - -
    - -kind
    -string - -
    -Installation -
    - -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - - -
    - -Refer to the Kubernetes API documentation for the fields of the -metadata field. - -
    - -spec
    - - -InstallationSpec - - - -
    - -

    -Specification of the desired state for the Calico or Calico Enterprise installation. -

    -
    -
    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    -variant
    - - -ProductVariant - - - -
    - -(Optional) -

    -Variant is the product to install - one of Calico or TigeraSecureEnterprise -Default: Calico -

    - -
    - -registry
    - -string - - -
    - -(Optional) -

    -Registry is the default Docker registry used for component Docker images. -If specified then the given value must end with a slash character (/) and all images will be pulled from this registry. -If not specified then the default registries will be used. A special case value, UseDefault, is -supported to explicitly specify the default registries will be used. -

    -

    -Image format: -<registry><imagePath>/<imagePrefix><imageName>:<image-tag> -

    -

    -This option allows configuring the <registry> portion of the above format. -

    - -
    - -imagePath
    - -string - - -
    - -(Optional) -

    -ImagePath allows for the path part of an image to be specified. If specified -then the specified value will be used as the image path for each image. If not specified -or empty, the default for each image will be used. -A special case value, UseDefault, is supported to explicitly specify the default -image path will be used for each image. -

    -

    -Image format: -<registry><imagePath>/<imagePrefix><imageName>:<image-tag> -

    -

    -This option allows configuring the <imagePath> portion of the above format. -

    - -
    - -imagePrefix
    - -string - - -
    - -(Optional) -

    -ImagePrefix allows for the prefix part of an image to be specified. If specified -then the given value will be used as a prefix on each image. If not specified -or empty, no prefix will be used. -A special case value, UseDefault, is supported to explicitly specify the default -image prefix will be used for each image. -

    -

    -Image format: -<registry><imagePath>/<imagePrefix><imageName>:<image-tag> -

    -

    -This option allows configuring the <imagePrefix> portion of the above format. -

    - -
    - -imagePullSecrets
    - - -[]Kubernetes core/v1.LocalObjectReference - - - -
    - -(Optional) -

    -ImagePullSecrets is an array of references to container registry pull secrets to use. These are -applied to all images to be pulled. -

    - -
    - -kubernetesProvider
    - - -Provider - - - -
    - -(Optional) -

    -KubernetesProvider specifies a particular provider of the Kubernetes platform and enables provider-specific configuration. -If the specified value is empty, the Operator will attempt to automatically determine the current provider. -If the specified value is not empty, the Operator will still attempt auto-detection, but -will additionally compare the auto-detected value to the specified value to confirm they match. -

    - -
    - -cni
    - - -CNISpec - - - -
    - -(Optional) -

    -CNI specifies the CNI that will be used by this installation. -

    - -
    - -calicoNetwork
    - - -CalicoNetworkSpec - - - -
    - -(Optional) -

    -CalicoNetwork specifies networking configuration options for Calico. -

    - -
    - -typhaAffinity
    - - -TyphaAffinity - - - -
    - -(Optional) -

    -Deprecated. Please use Installation.Spec.TyphaDeployment instead. -TyphaAffinity allows configuration of node affinity characteristics for Typha pods. -

    - -
    - -controlPlaneNodeSelector
    - -map[string]string - - -
    - -(Optional) -

    -ControlPlaneNodeSelector is used to select control plane nodes on which to run Calico -components. This is globally applied to all resources created by the operator excluding daemonsets. -

    - -
    - -controlPlaneTolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -ControlPlaneTolerations specify tolerations which are then globally applied to all resources -created by the operator. -

    - -
    - -controlPlaneReplicas
    - -int32 - - -
    - -(Optional) -

    -ControlPlaneReplicas defines how many replicas of the control plane core components will be deployed. -This field applies to all control plane components that support High Availability. Defaults to 2. -

    - -
    - -nodeMetricsPort
    - -int32 - - -
    - -(Optional) -

    -NodeMetricsPort specifies which port calico/node serves prometheus metrics on. By default, metrics are not enabled. -If specified, this overrides any FelixConfiguration resources which may exist. If omitted, then -prometheus metrics may still be configured through FelixConfiguration. -

    - -
    - -typhaMetricsPort
    - -int32 - - -
    - -(Optional) -

    -TyphaMetricsPort specifies which port calico/typha serves prometheus metrics on. By default, metrics are not enabled. -

    - -
    - -flexVolumePath
    - -string - - -
    - -(Optional) -

    -FlexVolumePath optionally specifies a custom path for FlexVolume. If not specified, FlexVolume will be -enabled by default. If set to ‘None’, FlexVolume will be disabled. The default is based on the -kubernetesProvider. -

    - -
    - -kubeletVolumePluginPath
    - -string - - -
    - -(Optional) -

    -KubeletVolumePluginPath optionally specifies enablement of Calico CSI plugin. If not specified, -CSI will be enabled by default. If set to ‘None’, CSI will be disabled. -Default: /var/lib/kubelet -

    - -
    - -nodeUpdateStrategy
    - - -Kubernetes apps/v1.DaemonSetUpdateStrategy - - - -
    - -(Optional) -

    -NodeUpdateStrategy can be used to customize the desired update strategy, such as the MaxUnavailable -field. -

    - -
    - -componentResources
    - - -[]ComponentResource - - - -
    - -(Optional) -

    -Deprecated. Please use CalicoNodeDaemonSet, TyphaDeployment, and KubeControllersDeployment. -ComponentResources can be used to customize the resource requirements for each component. -Node, Typha, and KubeControllers are supported for installations. -

    - -
    - -certificateManagement
    - - -CertificateManagement - - - -
    - -(Optional) -

    -CertificateManagement configures pods to submit a CertificateSigningRequest to the certificates.k8s.io/v1beta1 API in order -to obtain TLS certificates. This feature requires that you bring your own CSR signing and approval process, otherwise -pods will be stuck during initialization. -

    - -
    - -nonPrivileged
    - - -NonPrivilegedType - - - -
    - -(Optional) -

    -NonPrivileged configures Calico to be run in non-privileged containers as non-root users where possible. -

    - -
    - -calicoNodeDaemonSet
    - - -CalicoNodeDaemonSet - - - -
    - -

    -CalicoNodeDaemonSet configures the calico-node DaemonSet. If used in -conjunction with the deprecated ComponentResources, then these overrides take precedence. -

    - -
    - -calicoKubeControllersDeployment
    - - -CalicoKubeControllersDeployment - - - -
    - -

    -CalicoKubeControllersDeployment configures the calico-kube-controllers Deployment. If used in -conjunction with the deprecated ComponentResources, then these overrides take precedence. -

    - -
    - -typhaDeployment
    - - -TyphaDeployment - - - -
    - -

    -TyphaDeployment configures the typha Deployment. If used in conjunction with the deprecated -ComponentResources or TyphaAffinity, then these overrides take precedence. -

    - -
    - -calicoWindowsUpgradeDaemonSet
    - - -CalicoWindowsUpgradeDaemonSet - - - -
    - -

    -CalicoWindowsUpgradeDaemonSet configures the calico-windows-upgrade DaemonSet. -

    - -
    - -fipsMode
    - - -FIPSMode - - - -
    - -(Optional) -

    -FIPSMode uses images and features only that are using FIPS 140-2 validated cryptographic modules and standards. -Default: Disabled -

    - -
    -
    - -status
    - - -InstallationStatus - - - -
    - -

    -Most recently observed state for the Calico or Calico Enterprise installation. -

    - -
    -

    Monitor

    -

    -Monitor is the Schema for the monitor API. At most one instance -of this resource is supported. It must be named “tigera-secure”. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -apiVersion
    -string -
    - - -operator.tigera.io/v1 - - -
    - -kind
    -string - -
    -Monitor -
    - -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - - -
    - -Refer to the Kubernetes API documentation for the fields of the -metadata field. - -
    - -spec
    - - -MonitorSpec - - - -
    - -
    -
    - -
    - -
    - -status
    - - -MonitorStatus - - - -
    - -
    -

    TigeraStatus

    -

    -TigeraStatus represents the most recently observed status for Calico or a Calico Enterprise functional area. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -apiVersion
    -string -
    - - -operator.tigera.io/v1 - - -
    - -kind
    -string - -
    -TigeraStatus -
    - -metadata
    - - -Kubernetes meta/v1.ObjectMeta - - - -
    - -Refer to the Kubernetes API documentation for the fields of the -metadata field. - -
    - -spec
    - - -TigeraStatusSpec - - - -
    - -
    -
    - -
    - -
    - -status
    - - -TigeraStatusStatus - - - -
    - -
    -

    APIServerDeployment

    -

    - -(Appears on: -APIServerSpec) - -

    -

    -APIServerDeployment is the configuration for the API server Deployment. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to the Deployment. -

    - -
    - -spec
    - - -APIServerDeploymentSpec - - - -
    - -(Optional) -

    -Spec is the specification of the API server Deployment. -

    -
    -
    - -
    - -
    -

    APIServerDeploymentContainer

    -

    - -(Appears on: -APIServerDeploymentPodSpec) - -

    -

    -APIServerDeploymentContainer is an API server Deployment container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the API server Deployment container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named API server Deployment container’s resources. -If omitted, the API server Deployment will use its default value for this container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    APIServerDeploymentInitContainer

    -

    - -(Appears on: -APIServerDeploymentPodSpec) - -

    -

    -APIServerDeploymentInitContainer is an API server Deployment init container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the API server Deployment init container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named API server Deployment init container’s resources. -If omitted, the API server Deployment will use its default value for this init container’s resources. -

    - -
    -

    APIServerDeploymentPodSpec

    -

    - -(Appears on: -APIServerDeploymentPodTemplateSpec) - -

    -

    -APIServerDeploymentDeploymentPodSpec is the API server Deployment’s PodSpec. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -initContainers
    - - -[]APIServerDeploymentInitContainer - - - -
    - -(Optional) -

    -InitContainers is a list of API server init containers. -If specified, this overrides the specified API server Deployment init containers. -If omitted, the API server Deployment will use its default values for its init containers. -

    - -
    - -containers
    - - -[]APIServerDeploymentContainer - - - -
    - -(Optional) -

    -Containers is a list of API server containers. -If specified, this overrides the specified API server Deployment containers. -If omitted, the API server Deployment will use its default values for its containers. -

    - -
    - -affinity
    - - -Kubernetes core/v1.Affinity - - - -
    - -(Optional) -

    -Affinity is a group of affinity scheduling rules for the API server pods. -If specified, this overrides any affinity that may be set on the API server Deployment. -If omitted, the API server Deployment will use its default value for affinity. -WARNING: Please note that this field will override the default API server Deployment affinity. -

    - -
    - -nodeSelector
    - -map[string]string - - -
    - -

    -NodeSelector is the API server pod’s scheduling constraints. -If specified, each of the key/value pairs are added to the API server Deployment nodeSelector provided -the key does not already exist in the object’s nodeSelector. -If used in conjunction with ControlPlaneNodeSelector, that nodeSelector is set on the API server Deployment -and each of this field’s key/value pairs are added to the API server Deployment nodeSelector provided -the key does not already exist in the object’s nodeSelector. -If omitted, the API server Deployment will use its default value for nodeSelector. -WARNING: Please note that this field will modify the default API server Deployment nodeSelector. -

    - -
    - -topologySpreadConstraints
    - - -[]Kubernetes core/v1.TopologySpreadConstraint - - - -
    - -(Optional) -

    -TopologySpreadConstraints describes how a group of pods ought to spread across topology -domains. Scheduler will schedule pods in a way which abides by the constraints. -All topologySpreadConstraints are ANDed. -

    - -
    - -tolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -Tolerations is the API server pod’s tolerations. -If specified, this overrides any tolerations that may be set on the API server Deployment. -If omitted, the API server Deployment will use its default value for tolerations. -WARNING: Please note that this field will override the default API server Deployment tolerations. -

    - -
    -

    APIServerDeploymentPodTemplateSpec

    -

    - -(Appears on: -APIServerDeploymentSpec) - -

    -

    -APIServerDeploymentPodTemplateSpec is the API server Deployment’s PodTemplateSpec -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to -the pod’s metadata. -

    - -
    - -spec
    - - -APIServerDeploymentPodSpec - - - -
    - -(Optional) -

    -Spec is the API server Deployment’s PodSpec. -

    -
    -
    - -
    - -
    -

    APIServerDeploymentSpec

    -

    - -(Appears on: -APIServerDeployment) - -

    -

    -APIServerDeploymentSpec defines configuration for the API server Deployment. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -minReadySeconds
    - -int32 - - -
    - -(Optional) -

    -MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should -be ready without any of its container crashing, for it to be considered available. -If specified, this overrides any minReadySeconds value that may be set on the API server Deployment. -If omitted, the API server Deployment will use its default value for minReadySeconds. -

    - -
    - -template
    - - -APIServerDeploymentPodTemplateSpec - - - -
    - -(Optional) -

    -Template describes the API server Deployment pod that will be created. -

    - -
    -

    APIServerSpec

    -

    - -(Appears on: -APIServer) - -

    -

    -APIServerSpec defines the desired state of Tigera API server. -

    - - - - - - - - - - - - - -
    FieldDescription
    - -apiServerDeployment
    - - -APIServerDeployment - - - -
    - -

    -APIServerDeployment configures the calico-apiserver (or tigera-apiserver in Enterprise) Deployment. If -used in conjunction with ControlPlaneNodeSelector or ControlPlaneTolerations, then these overrides -take precedence. -

    - -
    -

    APIServerStatus

    -

    - -(Appears on: -APIServer) - -

    -

    -APIServerStatus defines the observed state of Tigera API server. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -state
    - -string - - -
    - -

    -State provides user-readable status. -

    - -
    - -conditions
    - - -[]Kubernetes meta/v1.Condition - - - -
    - -(Optional) -

    -Conditions represents the latest observed set of conditions for the component. A component may be one or more of -Ready, Progressing, Degraded or other customer types. -

    - -
    -

    AWSEgressGateway

    -

    - -(Appears on: -EgressGatewaySpec) - -

    -

    -AWSEgressGateway defines the configurations for deploying EgressGateway in AWS -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -nativeIP
    - - -NativeIP - - - -
    - -(Optional) -

    -NativeIP defines if EgressGateway is to use an AWS backed IPPool. -Default: Disabled -

    - -
    - -elasticIPs
    - -[]string - - -
    - -(Optional) -

    -ElasticIPs defines the set of elastic IPs that can be used for Egress Gateway pods. -NativeIP must be Enabled if elastic IPs are set. -

    - -
    -

    AnomalyDetectionSpec

    - - - - - - - - - - - - - -
    FieldDescription
    - -storageClassName
    - -string - - -
    - -(Optional) -

    -StorageClassName will be used to query for an existing StorageClass with the same as the field value. It will also -populate the PersistentVolumeClaim.StorageClassName that is used to provision disks for the Anomaly Detection API -pod for model storage. If the field is left blank, Anomaly Detection API will be using EmptyDir VolumeSource. -The StorageClassName should only be modified when no StorageClass is currently active. We recommend choosing a -storage class dedicated to AnomalyDetection only. Otherwise, model retention cannot be guaranteed during upgrades. -See https://docs.tigera.io/maintenance/upgrading for up-to-date instructions. -This field is not used for managed clusters in a Multi-cluster management setup. -

    - -
    -

    ApplicationLayerPolicyStatusType -(string alias)

    -

    - -(Appears on: -ApplicationLayerSpec) - -

    -

    ApplicationLayerSpec

    -

    - -(Appears on: -ApplicationLayer) - -

    -

    -ApplicationLayerSpec defines the desired state of ApplicationLayer -

    - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -webApplicationFirewall
    - - -WAFStatusType - - - -
    - -

    -WebApplicationFirewall controls whether or not ModSecurity enforcement is enabled for the cluster. -When enabled, Services may opt-in to having ingress traffic examined by ModSecurity. -

    - -
    - -logCollection
    - - -LogCollectionSpec - - - -
    - -

    -Specification for application layer (L7) log collection. -

    - -
    - -applicationLayerPolicy
    - - -ApplicationLayerPolicyStatusType - - - -
    - -

    -Application Layer Policy controls whether or not ALP enforcement is enabled for the cluster. -When enabled, NetworkPolicies with HTTP Match rules may be defined to opt-in workloads for traffic enforcement on the application layer. -

    - -
    -

    ApplicationLayerStatus

    -

    - -(Appears on: -ApplicationLayer) - -

    -

    -ApplicationLayerStatus defines the observed state of ApplicationLayer -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -state
    - -string - - -
    - -

    -State provides user-readable status. -

    - -
    - -conditions
    - - -[]Kubernetes meta/v1.Condition - - - -
    - -(Optional) -

    -Conditions represents the latest observed set of conditions for the component. A component may be one or more of -Ready, Progressing, Degraded or other customer types. -

    - -
    -

    BGPOption -(string alias)

    -

    - -(Appears on: -CalicoNetworkSpec) - -

    -

    -BGPOption describes the mode of BGP to use. -

    -

    -One of: Enabled, Disabled -

    -

    CAType -(string alias)

    -

    -CAType specifies which verification method the tunnel client should use to verify the tunnel server’s identity. -

    -

    -One of: Tigera, Public -

    -

    CNIPluginType -(string alias)

    -

    - -(Appears on: -CNISpec) - -

    -

    -CNIPluginType describes the type of CNI plugin used. -

    -

    -One of: Calico, GKE, AmazonVPC, AzureVNET -

    -

    CNISpec

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -CNISpec contains configuration for the CNI plugin. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -type
    - - -CNIPluginType - - - -
    - -

    -Specifies the CNI plugin that will be used in the Calico or Calico Enterprise installation. -* For KubernetesProvider GKE, this field defaults to GKE. -* For KubernetesProvider AKS, this field defaults to AzureVNET. -* For KubernetesProvider EKS, this field defaults to AmazonVPC. -* If aws-node daemonset exists in kube-system when the Installation resource is created, this field defaults to AmazonVPC. -* For all other cases this field defaults to Calico. -

    -

    -For the value Calico, the CNI plugin binaries and CNI config will be installed as part of deployment, -for all other values the CNI plugin binaries and CNI config is a dependency that is expected -to be installed separately. -

    -

    -Default: Calico -

    - -
    - -ipam
    - - -IPAMSpec - - - -
    - -(Optional) -

    -IPAM specifies the pod IP address management that will be used in the Calico or -Calico Enterprise installation. -

    - -
    -

    CalicoKubeControllersDeployment

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -CalicoKubeControllersDeployment is the configuration for the calico-kube-controllers Deployment. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to the Deployment. -

    - -
    - -spec
    - - -CalicoKubeControllersDeploymentSpec - - - -
    - -(Optional) -

    -Spec is the specification of the calico-kube-controllers Deployment. -

    -
    -
    - -
    - -
    -

    CalicoKubeControllersDeploymentContainer

    -

    - -(Appears on: -CalicoKubeControllersDeploymentPodSpec) - -

    -

    -CalicoKubeControllersDeploymentContainer is a calico-kube-controllers Deployment container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the calico-kube-controllers Deployment container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named calico-kube-controllers Deployment container’s resources. -If omitted, the calico-kube-controllers Deployment will use its default value for this container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    CalicoKubeControllersDeploymentPodSpec

    -

    - -(Appears on: -CalicoKubeControllersDeploymentPodTemplateSpec) - -

    -

    -CalicoKubeControllersDeploymentPodSpec is the calico-kube-controller Deployment’s PodSpec. -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -containers
    - - -[]CalicoKubeControllersDeploymentContainer - - - -
    - -(Optional) -

    -Containers is a list of calico-kube-controllers containers. -If specified, this overrides the specified calico-kube-controllers Deployment containers. -If omitted, the calico-kube-controllers Deployment will use its default values for its containers. -

    - -
    - -affinity
    - - -Kubernetes core/v1.Affinity - - - -
    - -(Optional) -

    -Affinity is a group of affinity scheduling rules for the calico-kube-controllers pods. -If specified, this overrides any affinity that may be set on the calico-kube-controllers Deployment. -If omitted, the calico-kube-controllers Deployment will use its default value for affinity. -WARNING: Please note that this field will override the default calico-kube-controllers Deployment affinity. -

    - -
    - -nodeSelector
    - -map[string]string - - -
    - -

    -NodeSelector is the calico-kube-controllers pod’s scheduling constraints. -If specified, each of the key/value pairs are added to the calico-kube-controllers Deployment nodeSelector provided -the key does not already exist in the object’s nodeSelector. -If used in conjunction with ControlPlaneNodeSelector, that nodeSelector is set on the calico-kube-controllers Deployment -and each of this field’s key/value pairs are added to the calico-kube-controllers Deployment nodeSelector provided -the key does not already exist in the object’s nodeSelector. -If omitted, the calico-kube-controllers Deployment will use its default value for nodeSelector. -WARNING: Please note that this field will modify the default calico-kube-controllers Deployment nodeSelector. -

    - -
    - -tolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -Tolerations is the calico-kube-controllers pod’s tolerations. -If specified, this overrides any tolerations that may be set on the calico-kube-controllers Deployment. -If omitted, the calico-kube-controllers Deployment will use its default value for tolerations. -WARNING: Please note that this field will override the default calico-kube-controllers Deployment tolerations. -

    - -
    -

    CalicoKubeControllersDeploymentPodTemplateSpec

    -

    - -(Appears on: -CalicoKubeControllersDeploymentSpec) - -

    -

    -CalicoKubeControllersDeploymentPodTemplateSpec is the calico-kube-controllers Deployment’s PodTemplateSpec -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to -the pod’s metadata. -

    - -
    - -spec
    - - -CalicoKubeControllersDeploymentPodSpec - - - -
    - -(Optional) -

    -Spec is the calico-kube-controllers Deployment’s PodSpec. -

    -
    -
    - -
    - -
    -

    CalicoKubeControllersDeploymentSpec

    -

    - -(Appears on: -CalicoKubeControllersDeployment) - -

    -

    -CalicoKubeControllersDeploymentSpec defines configuration for the calico-kube-controllers Deployment. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -minReadySeconds
    - -int32 - - -
    - -(Optional) -

    -MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should -be ready without any of its container crashing, for it to be considered available. -If specified, this overrides any minReadySeconds value that may be set on the calico-kube-controllers Deployment. -If omitted, the calico-kube-controllers Deployment will use its default value for minReadySeconds. -

    - -
    - -template
    - - -CalicoKubeControllersDeploymentPodTemplateSpec - - - -
    - -(Optional) -

    -Template describes the calico-kube-controllers Deployment pod that will be created. -

    - -
    -

    CalicoNetworkSpec

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -CalicoNetworkSpec specifies configuration options for Calico provided pod networking. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -linuxDataplane
    - - -LinuxDataplaneOption - - - -
    - -(Optional) -

    -LinuxDataplane is used to select the dataplane used for Linux nodes. In particular, it -causes the operator to add required mounts and environment variables for the particular dataplane. -If not specified, iptables mode is used. -Default: Iptables -

    - -
    - -bgp
    - - -BGPOption - - - -
    - -(Optional) -

    -BGP configures whether or not to enable Calico’s BGP capabilities. -

    - -
    - -ipPools
    - - -[]IPPool - - - -
    - -(Optional) -

    -IPPools contains a list of IP pools to create if none exist. At most one IP pool of each -address family may be specified. If omitted, a single pool will be configured if needed. -

    - -
    - -mtu
    - -int32 - - -
    - -(Optional) -

    -MTU specifies the maximum transmission unit to use on the pod network. -If not specified, Calico will perform MTU auto-detection based on the cluster network. -

    - -
    - -nodeAddressAutodetectionV4
    - - -NodeAddressAutodetection - - - -
    - -(Optional) -

    -NodeAddressAutodetectionV4 specifies an approach to automatically detect node IPv4 addresses. If not specified, -will use default auto-detection settings to acquire an IPv4 address for each node. -

    - -
    - -nodeAddressAutodetectionV6
    - - -NodeAddressAutodetection - - - -
    - -(Optional) -

    -NodeAddressAutodetectionV6 specifies an approach to automatically detect node IPv6 addresses. If not specified, -IPv6 addresses will not be auto-detected. -

    - -
    - -hostPorts
    - - -HostPortsType - - - -
    - -(Optional) -

    -HostPorts configures whether or not Calico will support Kubernetes HostPorts. Valid only when using the Calico CNI plugin. -Default: Enabled -

    - -
    - -multiInterfaceMode
    - - -MultiInterfaceMode - - - -
    - -(Optional) -

    -MultiInterfaceMode configures what will configure multiple interface per pod. Only valid for Calico Enterprise installations -using the Calico CNI plugin. -Default: None -

    - -
    - -containerIPForwarding
    - - -ContainerIPForwardingType - - - -
    - -(Optional) -

    -ContainerIPForwarding configures whether ip forwarding will be enabled for containers in the CNI configuration. -Default: Disabled -

    - -
    -

    CalicoNodeDaemonSet

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -CalicoNodeDaemonSet is the configuration for the calico-node DaemonSet. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to the DaemonSet. -

    - -
    - -spec
    - - -CalicoNodeDaemonSetSpec - - - -
    - -(Optional) -

    -Spec is the specification of the calico-node DaemonSet. -

    -
    -
    - -
    - -
    -

    CalicoNodeDaemonSetContainer

    -

    - -(Appears on: -CalicoNodeDaemonSetPodSpec) - -

    -

    -CalicoNodeDaemonSetContainer is a calico-node DaemonSet container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the calico-node DaemonSet container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named calico-node DaemonSet container’s resources. -If omitted, the calico-node DaemonSet will use its default value for this container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    CalicoNodeDaemonSetInitContainer

    -

    - -(Appears on: -CalicoNodeDaemonSetPodSpec) - -

    -

    -CalicoNodeDaemonSetInitContainer is a calico-node DaemonSet init container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the calico-node DaemonSet init container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named calico-node DaemonSet init container’s resources. -If omitted, the calico-node DaemonSet will use its default value for this container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    CalicoNodeDaemonSetPodSpec

    -

    - -(Appears on: -CalicoNodeDaemonSetPodTemplateSpec) - -

    -

    -CalicoNodeDaemonSetPodSpec is the calico-node DaemonSet’s PodSpec. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -initContainers
    - - -[]CalicoNodeDaemonSetInitContainer - - - -
    - -(Optional) -

    -InitContainers is a list of calico-node init containers. -If specified, this overrides the specified calico-node DaemonSet init containers. -If omitted, the calico-node DaemonSet will use its default values for its init containers. -

    - -
    - -containers
    - - -[]CalicoNodeDaemonSetContainer - - - -
    - -(Optional) -

    -Containers is a list of calico-node containers. -If specified, this overrides the specified calico-node DaemonSet containers. -If omitted, the calico-node DaemonSet will use its default values for its containers. -

    - -
    - -affinity
    - - -Kubernetes core/v1.Affinity - - - -
    - -(Optional) -

    -Affinity is a group of affinity scheduling rules for the calico-node pods. -If specified, this overrides any affinity that may be set on the calico-node DaemonSet. -If omitted, the calico-node DaemonSet will use its default value for affinity. -WARNING: Please note that this field will override the default calico-node DaemonSet affinity. -

    - -
    - -nodeSelector
    - -map[string]string - - -
    - -(Optional) -

    -NodeSelector is the calico-node pod’s scheduling constraints. -If specified, each of the key/value pairs are added to the calico-node DaemonSet nodeSelector provided -the key does not already exist in the object’s nodeSelector. -If omitted, the calico-node DaemonSet will use its default value for nodeSelector. -WARNING: Please note that this field will modify the default calico-node DaemonSet nodeSelector. -

    - -
    - -tolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -Tolerations is the calico-node pod’s tolerations. -If specified, this overrides any tolerations that may be set on the calico-node DaemonSet. -If omitted, the calico-node DaemonSet will use its default value for tolerations. -WARNING: Please note that this field will override the default calico-node DaemonSet tolerations. -

    - -
    -

    CalicoNodeDaemonSetPodTemplateSpec

    -

    - -(Appears on: -CalicoNodeDaemonSetSpec) - -

    -

    -CalicoNodeDaemonSetPodTemplateSpec is the calico-node DaemonSet’s PodTemplateSpec -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to -the pod’s metadata. -

    - -
    - -spec
    - - -CalicoNodeDaemonSetPodSpec - - - -
    - -(Optional) -

    -Spec is the calico-node DaemonSet’s PodSpec. -

    -
    -
    - -
    - -
    -

    CalicoNodeDaemonSetSpec

    -

    - -(Appears on: -CalicoNodeDaemonSet) - -

    -

    -CalicoNodeDaemonSetSpec defines configuration for the calico-node DaemonSet. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -minReadySeconds
    - -int32 - - -
    - -(Optional) -

    -MinReadySeconds is the minimum number of seconds for which a newly created DaemonSet pod should -be ready without any of its container crashing, for it to be considered available. -If specified, this overrides any minReadySeconds value that may be set on the calico-node DaemonSet. -If omitted, the calico-node DaemonSet will use its default value for minReadySeconds. -

    - -
    - -template
    - - -CalicoNodeDaemonSetPodTemplateSpec - - - -
    - -(Optional) -

    -Template describes the calico-node DaemonSet pod that will be created. -

    - -
    -

    CalicoWindowsUpgradeDaemonSet

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -CalicoWindowsUpgradeDaemonSet is the configuration for the calico-windows-upgrade DaemonSet. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to the Deployment. -

    - -
    - -spec
    - - -CalicoWindowsUpgradeDaemonSetSpec - - - -
    - -(Optional) -

    -Spec is the specification of the calico-windows-upgrade DaemonSet. -

    -
    -
    - -
    - -
    -

    CalicoWindowsUpgradeDaemonSetContainer

    -

    - -(Appears on: -CalicoWindowsUpgradeDaemonSetPodSpec) - -

    -

    -CalicoWindowsUpgradeDaemonSetContainer is a calico-windows-upgrade DaemonSet container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the calico-windows-upgrade DaemonSet container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named calico-windows-upgrade DaemonSet container’s resources. -If omitted, the calico-windows-upgrade DaemonSet will use its default value for this container’s resources. -

    - -
    -

    CalicoWindowsUpgradeDaemonSetPodSpec

    -

    - -(Appears on: -CalicoWindowsUpgradeDaemonSetPodTemplateSpec) - -

    -

    -CalicoWindowsUpgradeDaemonSetPodSpec is the calico-windows-upgrade DaemonSet’s PodSpec. -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -containers
    - - -[]CalicoWindowsUpgradeDaemonSetContainer - - - -
    - -(Optional) -

    -Containers is a list of calico-windows-upgrade containers. -If specified, this overrides the specified calico-windows-upgrade DaemonSet containers. -If omitted, the calico-windows-upgrade DaemonSet will use its default values for its containers. -

    - -
    - -affinity
    - - -Kubernetes core/v1.Affinity - - - -
    - -(Optional) -

    -Affinity is a group of affinity scheduling rules for the calico-windows-upgrade pods. -If specified, this overrides any affinity that may be set on the calico-windows-upgrade DaemonSet. -If omitted, the calico-windows-upgrade DaemonSet will use its default value for affinity. -WARNING: Please note that this field will override the default calico-windows-upgrade DaemonSet affinity. -

    - -
    - -nodeSelector
    - -map[string]string - - -
    - -(Optional) -

    -NodeSelector is the calico-windows-upgrade pod’s scheduling constraints. -If specified, each of the key/value pairs are added to the calico-windows-upgrade DaemonSet nodeSelector provided -the key does not already exist in the object’s nodeSelector. -If omitted, the calico-windows-upgrade DaemonSet will use its default value for nodeSelector. -WARNING: Please note that this field will modify the default calico-windows-upgrade DaemonSet nodeSelector. -

    - -
    - -tolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -Tolerations is the calico-windows-upgrade pod’s tolerations. -If specified, this overrides any tolerations that may be set on the calico-windows-upgrade DaemonSet. -If omitted, the calico-windows-upgrade DaemonSet will use its default value for tolerations. -WARNING: Please note that this field will override the default calico-windows-upgrade DaemonSet tolerations. -

    - -
    -

    CalicoWindowsUpgradeDaemonSetPodTemplateSpec

    -

    - -(Appears on: -CalicoWindowsUpgradeDaemonSetSpec) - -

    -

    -CalicoWindowsUpgradeDaemonSetPodTemplateSpec is the calico-windows-upgrade DaemonSet’s PodTemplateSpec -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to -the pod’s metadata. -

    - -
    - -spec
    - - -CalicoWindowsUpgradeDaemonSetPodSpec - - - -
    - -(Optional) -

    -Spec is the calico-windows-upgrade DaemonSet’s PodSpec. -

    -
    -
    - -
    - -
    -

    CalicoWindowsUpgradeDaemonSetSpec

    -

    - -(Appears on: -CalicoWindowsUpgradeDaemonSet) - -

    -

    -CalicoWindowsUpgradeDaemonSetSpec defines configuration for the calico-windows-upgrade DaemonSet. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -minReadySeconds
    - -int32 - - -
    - -(Optional) -

    -MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should -be ready without any of its container crashing, for it to be considered available. -If specified, this overrides any minReadySeconds value that may be set on the calico-windows-upgrade DaemonSet. -If omitted, the calico-windows-upgrade DaemonSet will use its default value for minReadySeconds. -

    - -
    - -template
    - - -CalicoWindowsUpgradeDaemonSetPodTemplateSpec - - - -
    - -(Optional) -

    -Template describes the calico-windows-upgrade DaemonSet pod that will be created. -

    - -
    -

    CertificateManagement

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -CertificateManagement configures pods to submit a CertificateSigningRequest to the certificates.k8s.io/v1beta1 API in order -to obtain TLS certificates. This feature requires that you bring your own CSR signing and approval process, otherwise -pods will be stuck during initialization. -

    - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -caCert
    - -[]byte - - -
    - -

    -Certificate of the authority that signs the CertificateSigningRequests in PEM format. -

    - -
    - -signerName
    - -string - - -
    - -

    -When a CSR is issued to the certificates.k8s.io API, the signerName is added to the request in order to accommodate for clusters -with multiple signers. -Must be formatted as: <my-domain>/<my-signername>. -

    - -
    - -keyAlgorithm
    - -string - - -
    - -(Optional) -

    -Specify the algorithm used by pods to generate a key pair that is associated with the X.509 certificate request. -Default: RSAWithSize2048 -

    - -
    - -signatureAlgorithm
    - -string - - -
    - -(Optional) -

    -Specify the algorithm used for the signature of the X.509 certificate request. -Default: SHA256WithRSA -

    - -
    -

    CollectProcessPathOption -(string alias)

    -

    ComponentName -(string alias)

    -

    - -(Appears on: -ComponentResource) - -

    -

    -ComponentName represents a single component. -

    -

    -One of: Node, Typha, KubeControllers -

    -

    ComponentResource

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -Deprecated. Please use component resource config fields in Installation.Spec instead. -The ComponentResource struct associates a ResourceRequirements with a component by name -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -componentName
    - - -ComponentName - - - -
    - -

    -ComponentName is an enum which identifies the component -

    - -
    - -resourceRequirements
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -

    -ResourceRequirements allows customization of limits and requests for compute resources such as cpu and memory. -

    - -
    -

    ConditionStatus -(string alias)

    -

    - -(Appears on: -TigeraStatusCondition) - -

    -

    -ConditionStatus represents the status of a particular condition. A condition may be one of: True, False, Unknown. -

    -

    ContainerIPForwardingType -(string alias)

    -

    - -(Appears on: -CalicoNetworkSpec) - -

    -

    -ContainerIPForwardingType specifies whether the CNI config for container ip forwarding is enabled. -

    -

    EGWDeploymentContainer

    -

    - -(Appears on: -EgressGatewayDeploymentPodSpec) - -

    -

    -EGWDeploymentContainer is a Egress Gateway Deployment container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the EGW Deployment container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named EGW Deployment container’s resources. -If omitted, the EGW Deployment will use its default value for this container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    EGWDeploymentInitContainer

    -

    - -(Appears on: -EgressGatewayDeploymentPodSpec) - -

    -

    -EGWDeploymentInitContainer is a Egress Gateway Deployment init container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the EGW Deployment init container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named EGW Deployment init container’s resources. -If omitted, the EGW Deployment will use its default value for this init container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    EgressGatewayDeploymentPodSpec

    -

    - -(Appears on: -EgressGatewayDeploymentPodTemplateSpec) - -

    -

    -EgressGatewayDeploymentPodSpec is the Egress Gateway Deployment’s PodSpec. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -initContainers
    - - -[]EGWDeploymentInitContainer - - - -
    - -(Optional) -

    -InitContainers is a list of EGW init containers. -If specified, this overrides the specified EGW Deployment init containers. -If omitted, the EGW Deployment will use its default values for its init containers. -

    - -
    - -containers
    - - -[]EGWDeploymentContainer - - - -
    - -(Optional) -

    -Containers is a list of EGW containers. -If specified, this overrides the specified EGW Deployment containers. -If omitted, the EGW Deployment will use its default values for its containers. -

    - -
    - -affinity
    - - -Kubernetes core/v1.Affinity - - - -
    - -(Optional) -

    -Affinity is a group of affinity scheduling rules for the EGW pods. -

    - -
    - -nodeSelector
    - -map[string]string - - -
    - -(Optional) -

    -NodeSelector gives more control over the nodes where the Egress Gateway pods will run on. -

    - -
    - -terminationGracePeriodSeconds
    - -int64 - - -
    - -(Optional) -

    -TerminationGracePeriodSeconds defines the termination grace period of the Egress Gateway pods in seconds. -

    - -
    - -topologySpreadConstraints
    - - -[]Kubernetes core/v1.TopologySpreadConstraint - - - -
    - -(Optional) -

    -TopologySpreadConstraints defines how the Egress Gateway pods should be spread across different AZs. -

    - -
    - -tolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -Tolerations is the egress gateway pod’s tolerations. -If specified, this overrides any tolerations that may be set on the EGW Deployment. -If omitted, the EGW Deployment will use its default value for tolerations. -

    - -
    -

    EgressGatewayDeploymentPodTemplateSpec

    -

    - -(Appears on: -EgressGatewaySpec) - -

    -

    -EgressGatewayDeploymentPodTemplateSpec is the EGW Deployment’s PodTemplateSpec -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -EgressGatewayMetadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to -the pod’s metadata. -

    - -
    - -spec
    - - -EgressGatewayDeploymentPodSpec - - - -
    - -(Optional) -

    -Spec is the EGW Deployment’s PodSpec. -

    -
    -
    - -
    - -
    -

    EgressGatewayFailureDetection

    -

    - -(Appears on: -EgressGatewaySpec) - -

    -

    -EgressGatewayFailureDetection defines the fields the needed for determining Egress Gateway -readiness. -

    - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -healthTimeoutDataStoreSeconds
    - -int32 - - -
    - -(Optional) -

    -HealthTimeoutDataStoreSeconds defines how long Egress Gateway can fail to connect -to the datastore before reporting not ready. -This value must be greater than 0. -Default: 90 -

    - -
    - -icmpProbe
    - - -ICMPProbe - - - -
    - -(Optional) -

    -ICMPProbe define outgoing ICMP probes that Egress Gateway will use to -verify its upstream connection. Egress Gateway will report not ready if all -fail. Timeout must be greater than interval. -

    - -
    - -httpProbe
    - - -HTTPProbe - - - -
    - -(Optional) -

    -HTTPProbe define outgoing HTTP probes that Egress Gateway will use to -verify its upsteam connection. Egress Gateway will report not ready if all -fail. Timeout must be greater than interval. -

    - -
    -

    EgressGatewayIPPool

    -

    - -(Appears on: -EgressGatewaySpec) - -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -(Optional) -

    -Name is the name of the IPPool that the Egress Gateways can use. -

    - -
    - -cidr
    - -string - - -
    - -(Optional) -

    -CIDR is the IPPool CIDR that the Egress Gateways can use. -

    - -
    -

    EgressGatewayMetadata

    -

    - -(Appears on: -EgressGatewayDeploymentPodTemplateSpec) - -

    -

    -EgressGatewayMetadata contains the standard Kubernetes labels and annotations fields. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -labels
    - -map[string]string - - -
    - -(Optional) -

    -Labels is a map of string keys and values that may match replica set and -service selectors. Each of these key/value pairs are added to the -object’s labels provided the key does not already exist in the object’s labels. -If not specified will default to projectcalico.org/egw:[name], where [name] is -the name of the Egress Gateway resource. -

    - -
    - -annotations
    - -map[string]string - - -
    - -(Optional) -

    -Annotations is a map of arbitrary non-identifying metadata. Each of these -key/value pairs are added to the object’s annotations provided the key does not -already exist in the object’s annotations. -

    - -
    -

    EgressGatewaySpec

    -

    - -(Appears on: -EgressGateway) - -

    -

    -EgressGatewaySpec defines the desired state of EgressGateway -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -replicas
    - -int32 - - -
    - -(Optional) -

    -Replicas defines how many instances of the Egress Gateway pod will run. -

    - -
    - -ipPools
    - - -[]EgressGatewayIPPool - - - -
    - -

    -IPPools defines the IP Pools that the Egress Gateway pods should be using. -Either name or CIDR must be specified. -IPPools must match existing IPPools. -

    - -
    - -externalNetworks
    - -[]string - - -
    - -(Optional) -

    -ExternalNetworks defines the external network names this Egress Gateway is -associated with. -ExternalNetworks must match existing external networks. -

    - -
    - -logSeverity
    - - -LogLevel - - - -
    - -(Optional) -

    -LogSeverity defines the logging level of the Egress Gateway. -Default: Info -

    - -
    - -template
    - - -EgressGatewayDeploymentPodTemplateSpec - - - -
    - -(Optional) -

    -Template describes the EGW Deployment pod that will be created. -

    - -
    - -egressGatewayFailureDetection
    - - -EgressGatewayFailureDetection - - - -
    - -(Optional) -

    -EgressGatewayFailureDetection is used to configure how Egress Gateway -determines readiness. If both ICMP, HTTP probes are defined, one ICMP probe and one -HTTP probe should succeed for Egress Gateways to become ready. -Otherwise one of ICMP or HTTP probe should succeed for Egress gateways to become -ready if configured. -

    - -
    - -aws
    - - -AWSEgressGateway - - - -
    - -(Optional) -

    -AWS defines the additional configuration options for Egress Gateways on AWS. -

    - -
    -

    EgressGatewayStatus

    -

    - -(Appears on: -EgressGateway) - -

    -

    -EgressGatewayStatus defines the observed state of EgressGateway -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -state
    - -string - - -
    - -

    -State provides user-readable status. -

    - -
    - -conditions
    - - -[]Kubernetes meta/v1.Condition - - - -
    - -(Optional) -

    -Conditions represents the latest observed set of conditions for the component. A component may be one or more of -Ready, Progressing, Degraded or other customer types. -

    - -
    -

    EncapsulationType -(string alias)

    -

    - -(Appears on: -IPPool) - -

    -

    -EncapsulationType is the type of encapsulation to use on an IP pool. -

    -

    -One of: IPIP, VXLAN, IPIPCrossSubnet, VXLANCrossSubnet, None -

    -

    EncryptionOption -(string alias)

    -

    -EncryptionOption specifies the traffic encryption mode when connecting to a Syslog server. -

    -

    -One of: None, TLS -

    -

    FIPSMode -(string alias)

    -

    - -(Appears on: -InstallationSpec) - -

    -

    HTTPProbe

    -

    - -(Appears on: -EgressGatewayFailureDetection) - -

    -

    -HTTPProbe defines the HTTP probe configuration for Egress Gateway. -

    - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -urls
    - -[]string - - -
    - -

    -URLs define the list of HTTP probe URLs. Egress Gateway will probe each URL -periodically.If all probes fail, Egress Gateway will report non-ready. -

    - -
    - -intervalSeconds
    - -int32 - - -
    - -(Optional) -

    -IntervalSeconds defines the interval of HTTP probes. Used when URLs is non-empty. -Default: 10 -

    - -
    - -timeoutSeconds
    - -int32 - - -
    - -(Optional) -

    -TimeoutSeconds defines the timeout value of HTTP probes. Used when URLs is non-empty. -Default: 30 -

    - -
    -

    HostPortsType -(string alias)

    -

    - -(Appears on: -CalicoNetworkSpec) - -

    -

    -HostPortsType specifies host port support. -

    -

    -One of: Enabled, Disabled -

    -

    ICMPProbe

    -

    - -(Appears on: -EgressGatewayFailureDetection) - -

    -

    -ICMPProbe defines the ICMP probe configuration for Egress Gateway. -

    - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -ips
    - -[]string - - -
    - -

    -IPs define the list of ICMP probe IPs. Egress Gateway will probe each IP -periodically. If all probes fail, Egress Gateway will report non-ready. -

    - -
    - -intervalSeconds
    - -int32 - - -
    - -(Optional) -

    -IntervalSeconds defines the interval of ICMP probes. Used when IPs is non-empty. -Default: 5 -

    - -
    - -timeoutSeconds
    - -int32 - - -
    - -(Optional) -

    -TimeoutSeconds defines the timeout value of ICMP probes. Used when IPs is non-empty. -Default: 15 -

    - -
    -

    IPAMPluginType -(string alias)

    -

    - -(Appears on: -IPAMSpec) - -

    -

    IPAMSpec

    -

    - -(Appears on: -CNISpec) - -

    -

    -IPAMSpec contains configuration for pod IP address management. -

    - - - - - - - - - - - - - -
    FieldDescription
    - -type
    - - -IPAMPluginType - - - -
    - -

    -Specifies the IPAM plugin that will be used in the Calico or Calico Enterprise installation. -* For CNI Plugin Calico, this field defaults to Calico. -* For CNI Plugin GKE, this field defaults to HostLocal. -* For CNI Plugin AzureVNET, this field defaults to AzureVNET. -* For CNI Plugin AmazonVPC, this field defaults to AmazonVPC. -

    -

    -The IPAM plugin is installed and configured only if the CNI plugin is set to Calico, -for all other values of the CNI plugin the plugin binaries and CNI config is a dependency -that is expected to be installed separately. -

    -

    -Default: Calico -

    - -
    -

    IPPool

    -

    - -(Appears on: -CalicoNetworkSpec) - -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -cidr
    - -string - - -
    - -

    -CIDR contains the address range for the IP Pool in classless inter-domain routing format. -

    - -
    - -encapsulation
    - - -EncapsulationType - - - -
    - -(Optional) -

    -Encapsulation specifies the encapsulation type that will be used with -the IP Pool. -Default: IPIP -

    - -
    - -natOutgoing
    - - -NATOutgoingType - - - -
    - -(Optional) -

    -NATOutgoing specifies if NAT will be enabled or disabled for outgoing traffic. -Default: Enabled -

    - -
    - -nodeSelector
    - -string - - -
    - -(Optional) -

    -NodeSelector specifies the node selector that will be set for the IP Pool. -Default: ‘all()’ -

    - -
    - -blockSize
    - -int32 - - -
    - -(Optional) -

    -BlockSize specifies the CIDR prefix length to use when allocating per-node IP blocks from -the main IP pool CIDR. -Default: 26 (IPv4), 122 (IPv6) -

    - -
    - -disableBGPExport
    - -bool - - -
    - -(Optional) -

    -DisableBGPExport specifies whether routes from this IP pool’s CIDR are exported over BGP. -Default: false -

    - -
    -

    Image

    -

    - -(Appears on: -ImageSetSpec) - -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -image
    - -string - - -
    - -

    -Image is an image that the operator deploys and instead of using the built in tag -the operator will use the Digest for the image identifier. -The value should be the image name without registry or tag or digest. -For the image docker.io/calico/node:v3.17.1 it should be represented as calico/node -

    - -
    - -digest
    - -string - - -
    - -

    -Digest is the image identifier that will be used for the Image. -The field should not include a leading @ and must be prefixed with sha256:. -

    - -
    -

    ImageSetSpec

    -

    - -(Appears on: -ImageSet) - -

    -

    -ImageSetSpec defines the desired state of ImageSet. -

    - - - - - - - - - - - - - -
    FieldDescription
    - -images
    - - -[]Image - - - -
    - -

    -Images is the list of images to use digests. All images that the operator will deploy -must be specified. -

    - -
    -

    InstallationSpec

    -

    - -(Appears on: -Installation, -InstallationStatus) - -

    -

    -InstallationSpec defines configuration for a Calico or Calico Enterprise installation. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -variant
    - - -ProductVariant - - - -
    - -(Optional) -

    -Variant is the product to install - one of Calico or TigeraSecureEnterprise -Default: Calico -

    - -
    - -registry
    - -string - - -
    - -(Optional) -

    -Registry is the default Docker registry used for component Docker images. -If specified then the given value must end with a slash character (/) and all images will be pulled from this registry. -If not specified then the default registries will be used. A special case value, UseDefault, is -supported to explicitly specify the default registries will be used. -

    -

    -Image format: -<registry><imagePath>/<imagePrefix><imageName>:<image-tag> -

    -

    -This option allows configuring the <registry> portion of the above format. -

    - -
    - -imagePath
    - -string - - -
    - -(Optional) -

    -ImagePath allows for the path part of an image to be specified. If specified -then the specified value will be used as the image path for each image. If not specified -or empty, the default for each image will be used. -A special case value, UseDefault, is supported to explicitly specify the default -image path will be used for each image. -

    -

    -Image format: -<registry><imagePath>/<imagePrefix><imageName>:<image-tag> -

    -

    -This option allows configuring the <imagePath> portion of the above format. -

    - -
    - -imagePrefix
    - -string - - -
    - -(Optional) -

    -ImagePrefix allows for the prefix part of an image to be specified. If specified -then the given value will be used as a prefix on each image. If not specified -or empty, no prefix will be used. -A special case value, UseDefault, is supported to explicitly specify the default -image prefix will be used for each image. -

    -

    -Image format: -<registry><imagePath>/<imagePrefix><imageName>:<image-tag> -

    -

    -This option allows configuring the <imagePrefix> portion of the above format. -

    - -
    - -imagePullSecrets
    - - -[]Kubernetes core/v1.LocalObjectReference - - - -
    - -(Optional) -

    -ImagePullSecrets is an array of references to container registry pull secrets to use. These are -applied to all images to be pulled. -

    - -
    - -kubernetesProvider
    - - -Provider - - - -
    - -(Optional) -

    -KubernetesProvider specifies a particular provider of the Kubernetes platform and enables provider-specific configuration. -If the specified value is empty, the Operator will attempt to automatically determine the current provider. -If the specified value is not empty, the Operator will still attempt auto-detection, but -will additionally compare the auto-detected value to the specified value to confirm they match. -

    - -
    - -cni
    - - -CNISpec - - - -
    - -(Optional) -

    -CNI specifies the CNI that will be used by this installation. -

    - -
    - -calicoNetwork
    - - -CalicoNetworkSpec - - - -
    - -(Optional) -

    -CalicoNetwork specifies networking configuration options for Calico. -

    - -
    - -typhaAffinity
    - - -TyphaAffinity - - - -
    - -(Optional) -

    -Deprecated. Please use Installation.Spec.TyphaDeployment instead. -TyphaAffinity allows configuration of node affinity characteristics for Typha pods. -

    - -
    - -controlPlaneNodeSelector
    - -map[string]string - - -
    - -(Optional) -

    -ControlPlaneNodeSelector is used to select control plane nodes on which to run Calico -components. This is globally applied to all resources created by the operator excluding daemonsets. -

    - -
    - -controlPlaneTolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -ControlPlaneTolerations specify tolerations which are then globally applied to all resources -created by the operator. -

    - -
    - -controlPlaneReplicas
    - -int32 - - -
    - -(Optional) -

    -ControlPlaneReplicas defines how many replicas of the control plane core components will be deployed. -This field applies to all control plane components that support High Availability. Defaults to 2. -

    - -
    - -nodeMetricsPort
    - -int32 - - -
    - -(Optional) -

    -NodeMetricsPort specifies which port calico/node serves prometheus metrics on. By default, metrics are not enabled. -If specified, this overrides any FelixConfiguration resources which may exist. If omitted, then -prometheus metrics may still be configured through FelixConfiguration. -

    - -
    - -typhaMetricsPort
    - -int32 - - -
    - -(Optional) -

    -TyphaMetricsPort specifies which port calico/typha serves prometheus metrics on. By default, metrics are not enabled. -

    - -
    - -flexVolumePath
    - -string - - -
    - -(Optional) -

    -FlexVolumePath optionally specifies a custom path for FlexVolume. If not specified, FlexVolume will be -enabled by default. If set to ‘None’, FlexVolume will be disabled. The default is based on the -kubernetesProvider. -

    - -
    - -kubeletVolumePluginPath
    - -string - - -
    - -(Optional) -

    -KubeletVolumePluginPath optionally specifies enablement of Calico CSI plugin. If not specified, -CSI will be enabled by default. If set to ‘None’, CSI will be disabled. -Default: /var/lib/kubelet -

    - -
    - -nodeUpdateStrategy
    - - -Kubernetes apps/v1.DaemonSetUpdateStrategy - - - -
    - -(Optional) -

    -NodeUpdateStrategy can be used to customize the desired update strategy, such as the MaxUnavailable -field. -

    - -
    - -componentResources
    - - -[]ComponentResource - - - -
    - -(Optional) -

    -Deprecated. Please use CalicoNodeDaemonSet, TyphaDeployment, and KubeControllersDeployment. -ComponentResources can be used to customize the resource requirements for each component. -Node, Typha, and KubeControllers are supported for installations. -

    - -
    - -certificateManagement
    - - -CertificateManagement - - - -
    - -(Optional) -

    -CertificateManagement configures pods to submit a CertificateSigningRequest to the certificates.k8s.io/v1beta1 API in order -to obtain TLS certificates. This feature requires that you bring your own CSR signing and approval process, otherwise -pods will be stuck during initialization. -

    - -
    - -nonPrivileged
    - - -NonPrivilegedType - - - -
    - -(Optional) -

    -NonPrivileged configures Calico to be run in non-privileged containers as non-root users where possible. -

    - -
    - -calicoNodeDaemonSet
    - - -CalicoNodeDaemonSet - - - -
    - -

    -CalicoNodeDaemonSet configures the calico-node DaemonSet. If used in -conjunction with the deprecated ComponentResources, then these overrides take precedence. -

    - -
    - -calicoKubeControllersDeployment
    - - -CalicoKubeControllersDeployment - - - -
    - -

    -CalicoKubeControllersDeployment configures the calico-kube-controllers Deployment. If used in -conjunction with the deprecated ComponentResources, then these overrides take precedence. -

    - -
    - -typhaDeployment
    - - -TyphaDeployment - - - -
    - -

    -TyphaDeployment configures the typha Deployment. If used in conjunction with the deprecated -ComponentResources or TyphaAffinity, then these overrides take precedence. -

    - -
    - -calicoWindowsUpgradeDaemonSet
    - - -CalicoWindowsUpgradeDaemonSet - - - -
    - -

    -CalicoWindowsUpgradeDaemonSet configures the calico-windows-upgrade DaemonSet. -

    - -
    - -fipsMode
    - - -FIPSMode - - - -
    - -(Optional) -

    -FIPSMode uses images and features only that are using FIPS 140-2 validated cryptographic modules and standards. -Default: Disabled -

    - -
    -

    InstallationStatus

    -

    - -(Appears on: -Installation) - -

    -

    -InstallationStatus defines the observed state of the Calico or Calico Enterprise installation. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -variant
    - - -ProductVariant - - - -
    - -

    -Variant is the most recently observed installed variant - one of Calico or TigeraSecureEnterprise -

    - -
    - -mtu
    - -int32 - - -
    - -

    -MTU is the most recently observed value for pod network MTU. This may be an explicitly -configured value, or based on Calico’s native auto-detection. -

    - -
    - -imageSet
    - -string - - -
    - -(Optional) -

    -ImageSet is the name of the ImageSet being used, if there is an ImageSet -that is being used. If an ImageSet is not being used then this will not be set. -

    - -
    - -computed
    - - -InstallationSpec - - - -
    - -(Optional) -

    -Computed is the final installation including overlaid resources. -

    - -
    - -calicoVersion
    - -string - - -
    - -

    -CalicoVersion shows the current running version of calico. -CalicoVersion along with Variant is needed to know the exact -version deployed. -

    - -
    - -conditions
    - - -[]Kubernetes meta/v1.Condition - - - -
    - -(Optional) -

    -Conditions represents the latest observed set of conditions for the component. A component may be one or more of -Ready, Progressing, Degraded or other customer types. -

    - -
    -

    KubernetesAutodetectionMethod -(string alias)

    -

    - -(Appears on: -NodeAddressAutodetection) - -

    -

    -KubernetesAutodetectionMethod is a method of detecting an IP address based on the Kubernetes API. -

    -

    -One of: NodeInternalIP -

    -

    LinuxDataplaneOption -(string alias)

    -

    - -(Appears on: -CalicoNetworkSpec) - -

    -

    -LinuxDataplaneOption controls which dataplane is to be used on Linux nodes. -

    -

    -One of: Iptables, BPF -

    -

    LogCollectionSpec

    -

    - -(Appears on: -ApplicationLayerSpec) - -

    - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -collectLogs
    - - -LogCollectionStatusType - - - -
    - -(Optional) -

    -This setting enables or disable log collection. -Allowed values are Enabled or Disabled. -

    - -
    - -logIntervalSeconds
    - -int64 - - -
    - -(Optional) -

    -Interval in seconds for sending L7 log information for processing. -Default: 5 sec -

    - -
    - -logRequestsPerInterval
    - -int64 - - -
    - -(Optional) -

    -Maximum number of unique L7 logs that are sent LogIntervalSeconds. -Adjust this to limit the number of L7 logs sent per LogIntervalSeconds -to felix for further processing, use negative number to ignore limits. -Default: -1 -

    - -
    -

    LogCollectionStatusType -(string alias)

    -

    - -(Appears on: -LogCollectionSpec) - -

    -

    LogLevel -(string alias)

    -

    - -(Appears on: -EgressGatewaySpec) - -

    -

    Metadata

    -

    - -(Appears on: -APIServerDeployment, -APIServerDeploymentPodTemplateSpec, -CalicoKubeControllersDeployment, -CalicoKubeControllersDeploymentPodTemplateSpec, -CalicoNodeDaemonSet, -CalicoNodeDaemonSetPodTemplateSpec, -CalicoWindowsUpgradeDaemonSet, -CalicoWindowsUpgradeDaemonSetPodTemplateSpec, -TyphaDeployment, -TyphaDeploymentPodTemplateSpec) - -

    -

    -Metadata contains the standard Kubernetes labels and annotations fields. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -labels
    - -map[string]string - - -
    - -(Optional) -

    -Labels is a map of string keys and values that may match replicaset and -service selectors. Each of these key/value pairs are added to the -object’s labels provided the key does not already exist in the object’s labels. -

    - -
    - -annotations
    - -map[string]string - - -
    - -(Optional) -

    -Annotations is a map of arbitrary non-identifying metadata. Each of these -key/value pairs are added to the object’s annotations provided the key does not -already exist in the object’s annotations. -

    - -
    -

    MonitorSpec

    -

    - -(Appears on: -Monitor) - -

    -

    -MonitorSpec defines the desired state of Tigera monitor. -

    -

    MonitorStatus

    -

    - -(Appears on: -Monitor) - -

    -

    -MonitorStatus defines the observed state of Tigera monitor. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -state
    - -string - - -
    - -

    -State provides user-readable status. -

    - -
    - -conditions
    - - -[]Kubernetes meta/v1.Condition - - - -
    - -(Optional) -

    -Conditions represents the latest observed set of conditions for the component. A component may be one or more of -Ready, Progressing, Degraded or other customer types. -

    - -
    -

    MultiInterfaceMode -(string alias)

    -

    - -(Appears on: -CalicoNetworkSpec) - -

    -

    -MultiInterfaceMode describes the method of providing multiple pod interfaces. -

    -

    -One of: None, Multus -

    -

    NATOutgoingType -(string alias)

    -

    - -(Appears on: -IPPool) - -

    -

    -NATOutgoingType describe the type of outgoing NAT to use. -

    -

    -One of: Enabled, Disabled -

    -

    NativeIP -(string alias)

    -

    - -(Appears on: -AWSEgressGateway) - -

    -

    -NativeIP defines if Egress Gateway pods should have AWS IPs. -When NativeIP is enabled, the IPPools should be backed by AWS subnet. -

    -

    NodeAddressAutodetection

    -

    - -(Appears on: -CalicoNetworkSpec) - -

    -

    -NodeAddressAutodetection provides configuration options for auto-detecting node addresses. At most one option -can be used. If no detection option is specified, then IP auto detection will be disabled for this address family and IPs -must be specified directly on the Node resource. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -firstFound
    - -bool - - -
    - -(Optional) -

    -FirstFound uses default interface matching parameters to select an interface, performing best-effort -filtering based on well-known interface names. -

    - -
    - -kubernetes
    - - -KubernetesAutodetectionMethod - - - -
    - -(Optional) -

    -Kubernetes configures Calico to detect node addresses based on the Kubernetes API. -

    - -
    - -interface
    - -string - - -
    - -(Optional) -

    -Interface enables IP auto-detection based on interfaces that match the given regex. -

    - -
    - -skipInterface
    - -string - - -
    - -(Optional) -

    -SkipInterface enables IP auto-detection based on interfaces that do not match -the given regex. -

    - -
    - -canReach
    - -string - - -
    - -(Optional) -

    -CanReach enables IP auto-detection based on which source address on the node is used to reach the -specified IP or domain. -

    - -
    - -cidrs
    - -[]string - - -
    - -

    -CIDRS enables IP auto-detection based on which addresses on the nodes are within -one of the provided CIDRs. -

    - -
    -

    NodeAffinity

    -

    - -(Appears on: -TyphaAffinity) - -

    -

    -NodeAffinity is similar to *v1.NodeAffinity, but allows us to limit available schedulers. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -preferredDuringSchedulingIgnoredDuringExecution
    - - -[]Kubernetes core/v1.PreferredSchedulingTerm - - - -
    - -(Optional) -

    -The scheduler will prefer to schedule pods to nodes that satisfy -the affinity expressions specified by this field, but it may choose -a node that violates one or more of the expressions. -

    - -
    - -requiredDuringSchedulingIgnoredDuringExecution
    - - -Kubernetes core/v1.NodeSelector - - - -
    - -(Optional) -

    -WARNING: Please note that if the affinity requirements specified by this field are not met at -scheduling time, the pod will NOT be scheduled onto the node. -There is no fallback to another affinity rules with this setting. -This may cause networking disruption or even catastrophic failure! -PreferredDuringSchedulingIgnoredDuringExecution should be used for affinity -unless there is a specific well understood reason to use RequiredDuringSchedulingIgnoredDuringExecution and -you can guarantee that the RequiredDuringSchedulingIgnoredDuringExecution will always have sufficient nodes to satisfy the requirement. -NOTE: RequiredDuringSchedulingIgnoredDuringExecution is set by default for AKS nodes, -to avoid scheduling Typhas on virtual-nodes. -If the affinity requirements specified by this field cease to be met -at some point during pod execution (e.g. due to an update), the system -may or may not try to eventually evict the pod from its node. -

    - -
    -

    NonPrivilegedType -(string alias)

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -NonPrivilegedType specifies whether Calico runs as permissioned or not -

    -

    -One of: Enabled, Disabled -

    -

    OIDCType -(string alias)

    -

    -OIDCType defines how OIDC is configured for Tigera Enterprise. Dex should be the best option for most use-cases. -The Tigera option can help in specific use-cases, for instance, when you are unable to configure a client secret. -One of: Dex, Tigera -

    -

    ProductVariant -(string alias)

    -

    - -(Appears on: -InstallationSpec, -InstallationStatus) - -

    -

    -ProductVariant represents the variant of the product. -

    -

    -One of: Calico, TigeraSecureEnterprise -

    -

    PromptType -(string alias)

    -

    -PromptType is a value that specifies whether the identity provider prompts the end user for re-authentication and -consent. -One of: None, Login, Consent, SelectAccount. -

    -

    Provider -(string alias)

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -Provider represents a particular provider or flavor of Kubernetes. Valid options -are: EKS, GKE, AKS, RKE2, OpenShift, DockerEnterprise. -

    -

    StatusConditionType -(string alias)

    -

    - -(Appears on: -TigeraStatusCondition) - -

    -

    -StatusConditionType is a type of condition that may apply to a particular component. -

    -

    TLS

    - - - - - - - - - - - - - -
    FieldDescription
    - -secretName
    - -string - - -
    - -(Optional) -

    -SecretName indicates the name of the secret in the tigera-operator namespace that contains the private key and certificate that the management cluster uses when it listens for incoming connections. -

    -

    -When set to tigera-management-cluster-connection voltron will use the same cert bundle which Guardian client certs are signed with. -

    -

    -When set to manager-tls, voltron will use the same cert bundle which Manager UI is served with. -This cert bundle must be a publicly signed cert created by the user. -Note that Tigera Operator will generate a self-signed manager-tls cert if one does not exist, -and use of that cert will result in Guardian being unable to verify Voltron’s identity. -

    -

    -If changed on a running cluster with connected managed clusters, all managed clusters will disconnect as they will no longer be able to verify Voltron’s identity. -To reconnect existing managed clusters, change the tls.ca of the managed clusters’ ManagementClusterConnection resource. -

    -

    -One of: tigera-management-cluster-connection, manager-tls -

    -

    -Default: tigera-management-cluster-connection -

    - -
    -

    TigeraStatusCondition

    -

    - -(Appears on: -TigeraStatusStatus) - -

    -

    -TigeraStatusCondition represents a condition attached to a particular component. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -type
    - - -StatusConditionType - - - -
    - -

    -The type of condition. May be Available, Progressing, or Degraded. -

    - -
    - -status
    - - -ConditionStatus - - - -
    - -

    -The status of the condition. May be True, False, or Unknown. -

    - -
    - -lastTransitionTime
    - - -Kubernetes meta/v1.Time - - - -
    - -

    -The timestamp representing the start time for the current status. -

    - -
    - -reason
    - -string - - -
    - -

    -A brief reason explaining the condition. -

    - -
    - -message
    - -string - - -
    - -

    -Optionally, a detailed message providing additional context. -

    - -
    - -observedGeneration
    - -int64 - - -
    - -(Optional) -

    -observedGeneration represents the generation that the condition was set based upon. -For instance, if generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date -with respect to the current state of the instance. -

    - -
    -

    TigeraStatusReason -(string alias)

    -

    -TigeraStatusReason represents the reason for a particular condition. -

    -

    TigeraStatusSpec

    -

    - -(Appears on: -TigeraStatus) - -

    -

    -TigeraStatusSpec defines the desired state of TigeraStatus -

    -

    TigeraStatusStatus

    -

    - -(Appears on: -TigeraStatus) - -

    -

    -TigeraStatusStatus defines the observed state of TigeraStatus -

    - - - - - - - - - - - - - -
    FieldDescription
    - -conditions
    - - -[]TigeraStatusCondition - - - -
    - -

    -Conditions represents the latest observed set of conditions for this component. A component may be one or more of -Available, Progressing, or Degraded. -

    - -
    -

    TyphaAffinity

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -Deprecated. Please use TyphaDeployment instead. -TyphaAffinity allows configuration of node affinity characteristics for Typha pods. -

    - - - - - - - - - - - - - -
    FieldDescription
    - -nodeAffinity
    - - -NodeAffinity - - - -
    - -(Optional) -

    -NodeAffinity describes node affinity scheduling rules for typha. -

    - -
    -

    TyphaDeployment

    -

    - -(Appears on: -InstallationSpec) - -

    -

    -TyphaDeployment is the configuration for the typha Deployment. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to the Deployment. -

    - -
    - -spec
    - - -TyphaDeploymentSpec - - - -
    - -(Optional) -

    -Spec is the specification of the typha Deployment. -

    -
    -
    - -
    - -
    -

    TyphaDeploymentContainer

    -

    - -(Appears on: -TyphaDeploymentPodSpec) - -

    -

    -TyphaDeploymentContainer is a typha Deployment container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the typha Deployment container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named typha Deployment container’s resources. -If omitted, the typha Deployment will use its default value for this container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    TyphaDeploymentInitContainer

    -

    - -(Appears on: -TyphaDeploymentPodSpec) - -

    -

    -TyphaDeploymentInitContainer is a typha Deployment init container. -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -name
    - -string - - -
    - -

    -Name is an enum which identifies the typha Deployment init container by name. -

    - -
    - -resources
    - - -Kubernetes core/v1.ResourceRequirements - - - -
    - -(Optional) -

    -Resources allows customization of limits and requests for compute resources such as cpu and memory. -If specified, this overrides the named typha Deployment init container’s resources. -If omitted, the typha Deployment will use its default value for this init container’s resources. -If used in conjunction with the deprecated ComponentResources, then this value takes precedence. -

    - -
    -

    TyphaDeploymentPodSpec

    -

    - -(Appears on: -TyphaDeploymentPodTemplateSpec) - -

    -

    -TyphaDeploymentPodSpec is the typha Deployment’s PodSpec. -

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -initContainers
    - - -[]TyphaDeploymentInitContainer - - - -
    - -(Optional) -

    -InitContainers is a list of typha init containers. -If specified, this overrides the specified typha Deployment init containers. -If omitted, the typha Deployment will use its default values for its init containers. -

    - -
    - -containers
    - - -[]TyphaDeploymentContainer - - - -
    - -(Optional) -

    -Containers is a list of typha containers. -If specified, this overrides the specified typha Deployment containers. -If omitted, the typha Deployment will use its default values for its containers. -

    - -
    - -affinity
    - - -Kubernetes core/v1.Affinity - - - -
    - -(Optional) -

    -Affinity is a group of affinity scheduling rules for the typha pods. -If specified, this overrides any affinity that may be set on the typha Deployment. -If omitted, the typha Deployment will use its default value for affinity. -If used in conjunction with the deprecated TyphaAffinity, then this value takes precedence. -WARNING: Please note that this field will override the default calico-typha Deployment affinity. -

    - -
    - -nodeSelector
    - -map[string]string - - -
    - -

    -NodeSelector is the calico-typha pod’s scheduling constraints. -If specified, each of the key/value pairs are added to the calico-typha Deployment nodeSelector provided -the key does not already exist in the object’s nodeSelector. -If omitted, the calico-typha Deployment will use its default value for nodeSelector. -WARNING: Please note that this field will modify the default calico-typha Deployment nodeSelector. -

    - -
    - -terminationGracePeriodSeconds
    - -int64 - - -
    - -(Optional) -

    -Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. -Value must be non-negative integer. The value zero indicates stop immediately via -the kill signal (no opportunity to shut down). -If this value is nil, the default grace period will be used instead. -The grace period is the duration in seconds after the processes running in the pod are sent -a termination signal and the time when the processes are forcibly halted with a kill signal. -Set this value longer than the expected cleanup time for your process. -Defaults to 30 seconds. -

    - -
    - -topologySpreadConstraints
    - - -[]Kubernetes core/v1.TopologySpreadConstraint - - - -
    - -(Optional) -

    -TopologySpreadConstraints describes how a group of pods ought to spread across topology -domains. Scheduler will schedule pods in a way which abides by the constraints. -All topologySpreadConstraints are ANDed. -

    - -
    - -tolerations
    - - -[]Kubernetes core/v1.Toleration - - - -
    - -(Optional) -

    -Tolerations is the typha pod’s tolerations. -If specified, this overrides any tolerations that may be set on the typha Deployment. -If omitted, the typha Deployment will use its default value for tolerations. -WARNING: Please note that this field will override the default calico-typha Deployment tolerations. -

    - -
    -

    TyphaDeploymentPodTemplateSpec

    -

    - -(Appears on: -TyphaDeploymentSpec) - -

    -

    -TyphaDeploymentPodTemplateSpec is the typha Deployment’s PodTemplateSpec -

    - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -metadata
    - - -Metadata - - - -
    - -(Optional) -

    -Metadata is a subset of a Kubernetes object’s metadata that is added to -the pod’s metadata. -

    - -
    - -spec
    - - -TyphaDeploymentPodSpec - - - -
    - -(Optional) -

    -Spec is the typha Deployment’s PodSpec. -

    -
    -
    - -
    - -
    -

    TyphaDeploymentSpec

    -

    - -(Appears on: -TyphaDeployment) - -

    -

    -TyphaDeploymentSpec defines configuration for the typha Deployment. -

    - - - - - - - - - - - - - - - - - - - - - -
    FieldDescription
    - -minReadySeconds
    - -int32 - - -
    - -(Optional) -

    -MinReadySeconds is the minimum number of seconds for which a newly created Deployment pod should -be ready without any of its container crashing, for it to be considered available. -If specified, this overrides any minReadySeconds value that may be set on the typha Deployment. -If omitted, the typha Deployment will use its default value for minReadySeconds. -

    - -
    - -template
    - - -TyphaDeploymentPodTemplateSpec - - - -
    - -(Optional) -

    -Template describes the typha Deployment pod that will be created. -

    - -
    - -strategy
    - - -TyphaDeploymentStrategy - - - -
    - -(Optional) -

    -The deployment strategy to use to replace existing pods with new ones. -

    - -
    -

    TyphaDeploymentStrategy

    -

    - -(Appears on: -TyphaDeploymentSpec) - -

    -

    -TyphaDeploymentStrategy describes how to replace existing pods with new ones. Only RollingUpdate is supported -at this time so the Type field is not exposed. -

    - - - - - - - - - - - - - -
    FieldDescription
    - -rollingUpdate
    - - -Kubernetes apps/v1.RollingUpdateDeployment - - - -
    - -(Optional) -

    -Rolling update config params. Present only if DeploymentStrategyType = -RollingUpdate. -to be. -

    - -
    -

    WAFStatusType -(string alias)

    -

    - -(Appears on: -ApplicationLayerSpec) - -

    -
    diff --git a/calico_versioned_docs/version-3.25/reference/installation/api.mdx b/calico_versioned_docs/version-3.25/reference/installation/api.mdx deleted file mode 100644 index 9cf5a8d79c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/installation/api.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Installation API reference ---- - -# Installation reference - -import API from '@site/calico/reference/installation/_api.mdx'; - -The Kubernetes resources below configure {{prodname}} installation when using the operator. Each resource is responsible for installing and configuring a different subsystem of {{prodname}} during installation. Most options can be modified on a running cluster using `kubectl`. - - diff --git a/calico_versioned_docs/version-3.25/reference/installation/config.json b/calico_versioned_docs/version-3.25/reference/installation/config.json deleted file mode 100644 index 347a060cfe..0000000000 --- a/calico_versioned_docs/version-3.25/reference/installation/config.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "hideMemberFields": [ - "TypeMeta" - ], - "hideTypePatterns": [ - "ParseError$", - "List$", - "AmazonCloudIntegration", - "MetadataAccessAllowedType", - "Authentication", - "Auth", - "AuthMethod", - "EmailVerificationType", - "Compliance", - "IntrusionDetection", - "LogCollector", - "S3StoreSpec", - "SyslogStoreSpec", - "SyslogLogType", - "LogStorage", - "Retention", - "Nodes", - "NodeSet", - "NodeSetSelectionAttribute", - "Indices", - "AdditionalLogStoreSpec", - "AdditionalLogSourceSpec", - "EksCloudwatchLogsSpec", - "SplunkStoreSpec", - "ManagementCluster", - "Manager", - "AuthType", - "GroupSearch", - "UserMatch", - "UserSearch" - ], - "externalPackages": [ - { - "typeMatchPrefix": "^k8s\\.io/apimachinery/pkg/apis/meta/v1\\.Duration$", - "docsURLTemplate": "https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Duration" - }, - { - "typeMatchPrefix": "^k8s\\.io/(api|apimachinery/pkg/apis)/", - "docsURLTemplate": "https://v1-21.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#{{lower .TypeIdentifier}}-{{arrIndex .PackageSegments -1}}-{{arrIndex .PackageSegments -2}}" - }, - { - "typeMatchPrefix": "^github\\.com/knative/pkg/apis/duck/", - "docsURLTemplate": "https://godoc.org/github.com/knative/pkg/apis/duck/{{arrIndex .PackageSegments -1}}#{{.TypeIdentifier}}" - } - ], - "typeDisplayNamePrefixOverrides": { - "k8s.io/api/": "Kubernetes ", - "k8s.io/apimachinery/pkg/apis/": "Kubernetes " - }, - "markdownDisabled": false -} diff --git a/calico_versioned_docs/version-3.25/reference/involved.mdx b/calico_versioned_docs/version-3.25/reference/involved.mdx deleted file mode 100644 index ddd59cb707..0000000000 --- a/calico_versioned_docs/version-3.25/reference/involved.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -description: Contribute to Calico open source project. ---- - -# Getting involved - -Calico is an open source project, and we'd love you to get involved. -Whether that might be by reading and participating on our slack, -or by diving into the code to propose enhancements or integrate with -other systems. To see the options for getting involved with Calico the -project, please take a look at the following. - -## Join us on Slack - -Our [public slack](https://calicousers.slack.com) is the quickest way to get -in touch for help debugging any issues with Calico. - -## Read the source, Luke! - -All of Calico's code is on [GitHub](https://github.com/projectcalico/calico). - -## Contributing - -Calico follows the "Fork & Pull" model of collaborative development, -with changes being offered to the main Calico codebase via pull -requests. So you can contribute a fix, change, or enhancement by forking -one of our repositories and making a GitHub pull request. If you're -interested in doing that: - -- Thanks! -- See the [GitHub docs](https://help.github.com/articles/using-pull-requests) for how - to create a pull request. -- Check the [contribution and developer docs](https://github.com/projectcalico/calico/blob/master/CONTRIBUTING.md). diff --git a/calico_versioned_docs/version-3.25/reference/kube-controllers/configuration.mdx b/calico_versioned_docs/version-3.25/reference/kube-controllers/configuration.mdx deleted file mode 100644 index 2cf99c010c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/kube-controllers/configuration.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -description: Calico Kubernetes controllers monitor the Kubernetes API and perform actions based on cluster state. ---- - -# Configuring the Calico Kubernetes controllers - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -The {{prodname}} Kubernetes controllers are deployed in a Kubernetes cluster. The different controllers monitor the Kubernetes API -and perform actions based on cluster state. - - - - -If you have installed Calico using the operator, see the [KubeControllersConfiguration](../resources/kubecontrollersconfig.mdx) resource instead. - - - - -The controllers are primarily configured through environment variables. When running -the controllers as a Kubernetes pod, this is accomplished through the pod manifest `env` -section. - -## The {{imageNames.calico/kube-controllers}} container - -The `{{imageNames.calico/kube-controllers}}` container includes the following controllers: - -1. policy controller: watches Kubernetes network policies in the Kubernetes API, and syncs the policies to the datastore (etcd) as {{prodname}} network policies. Felix implements network policies in the dataplane. -1. namespace controller: watches namespaces and programs {{prodname}} profiles. -1. serviceaccount controller: watches service accounts and programs {{prodname}} profiles. -1. workloadendpoint controller: watches for changes to pod labels and updates {{prodname}} workload endpoints. -1. node controller: watches for the removal of Kubernetes nodes and removes corresponding data from {{prodname}}, and optionally watches for node updates to create and sync host endpoints for each node. - -The {{prodname}} Kubernetes manifests run these controllers within a single pod in the `calico-kube-controllers` deployment. - -### Configuring datastore access - -The datastore type can be configured via the `DATASTORE_TYPE` environment variable. Supported values are `etcdv3` and `kubernetes`. - -#### etcdv3 - -The {{prodname}} Kubernetes controllers support the following environment variables to configure etcd access: - -| Environment | Description | Schema | -| -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| `ETCD_ENDPOINTS` | Comma-delimited list of etcd endpoints to connect to. Example: `http://10.0.0.1:2379,http://10.0.0.2:2379`. | string | -| `ETCD_DISCOVERY_SRV` | Domain name to discover etcd endpoints via SRV records. Mutually exclusive with `ETCD_ENDPOINTS`. Example: `example.com` | string | -| `ETCD_CA_CERT_FILE` | Path to the file containing the root certificate of the CA that issued the etcd server certificate. Configures the Kubernetes controllers to trust the signature on the certificates provided by the etcd server. To disable authentication of the server by the Kubernetes controllers, set the value to `none`. | path | -| `ETCD_CERT_FILE` | Path to the file containing the client certificate issued to the Kubernetes controllers. Enables the Kubernetes controllers to participate in mutual TLS authentication and identify themselves to the etcd server. Example: `/etc/kube-controllers/cert.pem` | path | -| `ETCD_KEY_FILE` | Path to the file containing the private key of the Kubernetes controllers' client certificate. Enables the Kubernetes controllers to participate in mutual TLS authentication and identify themselves to the etcd server. Example: `/etc/kube-controllers/key.pem` | path | - -The `*_FILE` variables are _paths_ to the corresponding certificates/keys. As such, when the controllers are running as a Kubernetes pod, you -must ensure that the files exist within the pod. This is usually done in one of two ways: - -- Mount the certificates from the host. This requires that the certificates be present on the host running the controller. -- Use Kubernetes [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) to mount the certificates into the pod as files. - -#### kubernetes - -When running the controllers as a Kubernetes pod, Kubernetes API access is [configured automatically][in-cluster-config] and -no additional configuration is required. However, the controllers can also be configured to use an explicit [kubeconfig][kubeconfig] file override to -configure API access if needed. - -| Environment | Description | Schema | -| ------------ | ------------------------------------------------------------------ | ------ | -| `KUBECONFIG` | Path to a Kubernetes kubeconfig file mounted within the container. | path | - -### Other configuration - -:::note - -Whenever possible, prefer configuring the kube-controllers component using the [KubeControllersConfiguration](../resources/kubecontrollersconfig.mdx) API resource, -Some configuration options may not be available through environment variables. - -::: - -The following environment variables can be used to configure the {{prodname}} Kubernetes controllers. - -| Environment | Description | Schema | Default | -| --------------------- | --------------------------------------------------------------------------- | --------------------------------------------------------- | ----------------------------------------------------- | -| `DATASTORE_TYPE` | Which datastore type to use | etcdv3, kubernetes | kubernetes | -| `ENABLED_CONTROLLERS` | Which controllers to run | namespace, node, policy, serviceaccount, workloadendpoint | policy,namespace,serviceaccount,workloadendpoint,node | -| `LOG_LEVEL` | Minimum log level to be displayed. | debug, info, warning, error | info | -| `KUBECONFIG` | Path to a kubeconfig file for Kubernetes API access | path | -| `SYNC_NODE_LABELS` | When enabled, Kubernetes node labels will be copied to Calico node objects. | boolean | true | -| `AUTO_HOST_ENDPOINTS` | When set to enabled, automatically create a host endpoint for each node. | enabled, disabled | disabled | -| `COMPACTION_PERIOD` | Compact the etcd database on this interval. Set to "0" to disable. | [duration](https://golang.org/pkg/time/#ParseDuration) | 10m | - -## About each controller - -### Node controller - -The node controller has several functions depending on the datastore in use. - -**Either datastore** - -- Garbage collects IP addresses. -- Automatically provisions host endpoints for Kubernetes nodes. - -**etcdv3 only** - -- Garbage collects projectcalico.org/v3 Node resources when the Kubernetes node is deleted. -- Synchronizes labels between Kubernetes and Calico Node resources. - -The node controller is not enabled by default if `ENABLED_CONTROLLERS` is not explicitly specified. -However, the {{prodname}} Kubernetes manifests explicitly specify the `ENABLED_CONTROLLERS` and enable this controller -within the calico-kube-controllers deployment. - -This controller is valid when using either the `etcdv3` or `kubernetes` datastore types. - -#### etcdv3 - -To enable the node controller when using `etcdv3`, perform the following two steps. - -1. Enable the controller in your [KubeControllersConfiguration](../resources/kubecontrollersconfig.mdx) or add "node" to the list of enabled controllers in the environment for kube-controllers. For example: `ENABLED_CONTROLLERS=workloadendpoint,profile,policy,node` -1. Configure {{nodecontainer}} with a Kubernetes node reference by adding the following snippet to the environment section of the {{noderunning}} daemon set. - - ```yaml - - name: CALICO_K8S_NODE_REF - valueFrom: - fieldRef: - fieldPath: spec.nodeName - ``` - -Set `SYNC_NODE_LABELS` to true (enabled by default) to ensure that labels on -Kubernetes node resources remain in-sync with labels on the corresponding {{prodname}} -node resource. If both node resources specify a label with different values, -the Kubernetes node resource takes precedence. Labels on the {{prodname}} -resource that don't exist in the Kubernetes node will remain as is. - -#### kubernetes - -To enable the node controller when using `kubernetes`, enable the controller in your [KubeControllersConfiguration](../resources/kubecontrollersconfig.mdx) or set the list of enabled controllers in the environment for kube-controllers to `node`. For example: `ENABLED_CONTROLLERS=node` - -### Policy controller - -The policy controller syncs Kubernetes network policies to the {{prodname}} datastore. The controller must have read -access to the Kubernetes API to monitor `NetworkPolicy` events. - -The policy controller is enabled by default if `ENABLED_CONTROLLERS` is not explicitly specified. - -This controller is only valid when using etcd as the {{prodname}} datastore. - -### Workload endpoint controller - -The workload endpoint controller automatically syncs Kubernetes pod label changes to the {{prodname}} datastore by updating -the corresponding workload endpoints appropriately. The controller must have read -access to the Kubernetes API to monitor `Pod` events. - -The workload endpoint controller is enabled by default if `ENABLED_CONTROLLERS` is not explicitly specified. - -This controller is only valid when using etcd as the {{prodname}} datastore. - -### Namespace controller - -The namespace controller syncs Kubernetes namespace label changes to the {{prodname}} datastore. The controller must have read -access to the Kubernetes API to monitor `Namespace` events. - -The namespace controller is enabled by default if `ENABLED_CONTROLLERS` is not explicitly specified. - -This controller is only valid when using etcd as the {{prodname}} datastore. - -### Service account controller - -The service account controller syncs Kubernetes service account changes to the {{prodname}} datastore. -The controller must have read access to the Kubernetes API to monitor `ServiceAccount` events. - -The service account controller is enabled by default if `ENABLED_CONTROLLERS` is not explicitly specified. - -This controller is only valid when using etcd as the {{prodname}} datastore. - - - - -[in-cluster-config]: https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod -[kubeconfig]: https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ diff --git a/calico_versioned_docs/version-3.25/reference/kube-controllers/index.mdx b/calico_versioned_docs/version-3.25/reference/kube-controllers/index.mdx deleted file mode 100644 index 303cb85423..0000000000 --- a/calico_versioned_docs/version-3.25/reference/kube-controllers/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: kube-controllers is a set of Kubernetes controllers for Calico -hide_table_of_contents: true ---- - -# kube-controllers - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/kube-controllers/prometheus.mdx b/calico_versioned_docs/version-3.25/reference/kube-controllers/prometheus.mdx deleted file mode 100644 index 6fc613ea29..0000000000 --- a/calico_versioned_docs/version-3.25/reference/kube-controllers/prometheus.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -description: Review metrics for the kube-controllers component if you are using Prometheus. ---- - -# Prometheus metrics - -kube-controllers can be configured to report a number of metrics through Prometheus. This reporting is enabled by default on port 9094. See the -[configuration reference](../resources/kubecontrollersconfig.mdx) for how to change metrics reporting configuration (or disable it completely). - -## Metric reference - -#### kube-controllers specific - -kube-controllers exports a number of Prometheus metrics. The current set is as follows. Since some metrics -may be tied to particular implementation choices inside kube-controllers we can't make any hard guarantees that -metrics will persist across releases. However, we aim not to make any spurious changes to -existing metrics. - -| Metric Name | Labels | Description | -| ------------------------------------ | ------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `ipam_allocations_in_use` | ippool, node | Number of Calico IP allocations currently in use by a workload or interface. | -| `ipam_allocations_borrowed` | ippool, node | Number of Calico IP allocations currently in use where the allocation was borrowed from a block affine to another node. | -| `ipam_allocations_gc_candidates` | ippool, node | Number of Calico IP allocations currently marked by the GC as potential leaks. This metric returns to zero under normal GC operation. | -| `ipam_allocations_gc_reclamations` | ippool, node | Count of Calico IP allocations that have been reclaimed by the GC. Increase of this counter corresponds with a decrease of the candidates gauge under normal operation. | -| `ipam_blocks` | ippool, node | Number of IPAM blocks. | -| `ipam_ippool_size` | ippool | Number of IP addresses in the IP Pool CIDR. | -| `ipam_blocks_per_node` | node | Number of IPAM blocks, indexed by the node to which they have affinity. Prefer `ipam_blocks` for new integrations. | -| `ipam_allocations_per_node` | node | Number of Calico IP allocations, indexed by node on which the allocation was made. Prefer `ipam_allocations_in_use` for new integrations. | -| `ipam_allocations_borrowed_per_node` | node | Number of Calico IP allocations borrowed from a non-affine block, indexed by node on which the allocation was made. Prefer `ipam_allocations_borrowed` for new integrations. | - -Labels can be interpreted as follows: - -| Label Name | Description | -| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `node` | For allocation metrics, the node on which the allocation was made. For block metrics, the node for which the block has affinity. If the block has no affinity, value will be `no_affinity`. | -| `ippool` | The IP Pool that the IPAM block occupies. If there is no IP Pool which matches the block, value will be `no_ippool`. | - -Prometheus metrics are self-documenting, with metrics turned on, `curl` can be used to list the -metrics along with their help text and type information. - -```bash -curl -s http://localhost:9094/metrics | head -``` - -#### CPU / memory metrics - -kube-controllers also exports the default set of metrics that Prometheus makes available. Currently, those -include: - -| Name | Description | -| -------------------------------------------- | ------------------------------------------------------------------ | -| `go_gc_duration_seconds` | A summary of the GC invocation durations. | -| `go_goroutines` | Number of goroutines that currently exist. | -| `go_memstats_alloc_bytes` | Number of bytes allocated and still in use. | -| `go_memstats_alloc_bytes_total` | Total number of bytes allocated, even if freed. | -| `go_memstats_buck_hash_sys_bytes` | Number of bytes used by the profiling bucket hash table. | -| `go_memstats_frees_total` | Total number of frees. | -| `go_memstats_gc_sys_bytes` | Number of bytes used for garbage collection system metadata. | -| `go_memstats_heap_alloc_bytes` | Number of heap bytes allocated and still in use. | -| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. | -| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. | -| `go_memstats_heap_objects` | Number of allocated objects. | -| `go_memstats_heap_released_bytes_total` | Total number of heap bytes released to OS. | -| `go_memstats_heap_sys_bytes` | Number of heap bytes obtained from system. | -| `go_memstats_last_gc_time_seconds` | Number of seconds since 1970 of last garbage collection. | -| `go_memstats_lookups_total` | Total number of pointer lookups. | -| `go_memstats_mallocs_total` | Total number of mallocs. | -| `go_memstats_mcache_inuse_bytes` | Number of bytes in use by mcache structures. | -| `go_memstats_mcache_sys_bytes` | Number of bytes used for mcache structures obtained from system. | -| `go_memstats_mspan_inuse_bytes` | Number of bytes in use by mspan structures. | -| `go_memstats_mspan_sys_bytes` | Number of bytes used for mspan structures obtained from system. | -| `go_memstats_next_gc_bytes` | Number of heap bytes when next garbage collection will take place. | -| `go_memstats_other_sys_bytes` | Number of bytes used for other system allocations. | -| `go_memstats_stack_inuse_bytes` | Number of bytes in use by the stack allocator. | -| `go_memstats_stack_sys_bytes` | Number of bytes obtained from system for stack allocator. | -| `go_memstats_sys_bytes` | Number of bytes obtained by system. Sum of all system allocations. | -| `process_cpu_seconds_total` | Total user and system CPU time spent in seconds. | -| `process_max_fds` | Maximum number of open file descriptors. | -| `process_open_fds` | Number of open file descriptors. | -| `process_resident_memory_bytes` | Resident memory size in bytes. | -| `process_start_time_seconds` | Start time of the process since unix epoch in seconds. | -| `process_virtual_memory_bytes` | Virtual memory size in bytes. | -| `promhttp_metric_handler_requests_in_flight` | Current number of scrapes being served. | -| `promhttp_metric_handler_requests_total` | Total number of scrapes by HTTP status code. | diff --git a/calico_versioned_docs/version-3.25/reference/public-cloud/aws.mdx b/calico_versioned_docs/version-3.25/reference/public-cloud/aws.mdx deleted file mode 100644 index 5cc72502e4..0000000000 --- a/calico_versioned_docs/version-3.25/reference/public-cloud/aws.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -description: Advantages of using Calico in AWS. ---- - -# Amazon Web Services - -{{prodname}} provides the following advantages when running in Amazon Web Services (AWS): - -- **Network Policy for Containers**: {{prodname}} provides fine-grained network security policy for individual containers. -- **No Overlays**: Within each VPC subnet {{prodname}} doesn't need an overlay, which means high performance networking for your containers. -- **No 50 Node Limit**: {{prodname}} allows you to surpass the 50 node limit, which exists as a consequence of the [AWS 50 route limit](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Appendix_Limits.html#vpc-limits-route-tables) when using the VPC routing table. - -## Routing traffic within a single VPC subnet - -Since {{prodname}} assigns IP addresses outside the range used by AWS for EC2 instances, you must disable AWS src/dst -checks on each EC2 instance in your cluster -[as described in the AWS documentation](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck). This -allows {{prodname}} to route traffic natively within a single VPC subnet without using an overlay or any of the limited VPC routing table entries. - -## Routing traffic across different VPC subnets / VPCs - -If you need to split your deployment across multiple AZs for high availability then each AZ will have its own VPC subnet. To -use {{prodname}} across multiple different VPC subnets or [peered VPCs](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html), -in addition to disabling src/dst checks as described above you must also enable IPIP encapsulation and outgoing NAT -on your {{prodname}} IP pools. - -See the [IP pool configuration reference](../resources/ippool.mdx) -for information on how to configure {{prodname}} IP pools. - -By default, {{prodname}}'s IPIP encapsulation applies to all container-to-container traffic. However, -encapsulation is only required for container traffic that crosses a VPC subnet boundary. For better -performance, you can configure {{prodname}} to perform IPIP encapsulation only across VPC subnet boundaries. - -To enable the "CrossSubnet" IPIP feature, configure your {{prodname}} IP pool resources -to enable IPIP and set the mode to "CrossSubnet". - -:::note - -This feature was introduced in {{prodname}} v2.1, if your deployment was created with -an older version of {{prodname}}, or if you if you are unsure whether your deployment -is configured correctly, follow the [Configuring IP-in-IP guide](../../networking/configuring/vxlan-ipip.mdx) -which discusses this in more detail. - -::: - -The following `calicoctl` command will create or modify an IPv4 pool with -CIDR 192.168.0.0/16 using IPIP mode `CrossSubnet`. Adjust the pool CIDR for your deployment. - -```bash -calicoctl apply -f - < diff --git a/calico_versioned_docs/version-3.25/reference/resources/bgpconfig.mdx b/calico_versioned_docs/version-3.25/reference/resources/bgpconfig.mdx deleted file mode 100644 index 24301e861e..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/bgpconfig.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# BGP configuration - -A BGP configuration resource (`BGPConfiguration`) represents BGP specific configuration options for the cluster or a -specific node. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPConfiguration -metadata: - name: default -spec: - logSeverityScreen: Info - nodeToNodeMeshEnabled: true - nodeMeshMaxRestartTime: 120s - asNumber: 63400 - serviceClusterIPs: - - cidr: 10.96.0.0/12 - serviceExternalIPs: - - cidr: 104.244.42.129/32 - - cidr: 172.217.3.0/24 - listenPort: 178 - bindMode: NodeIP - communities: - - name: bgp-large-community - value: 63400:300:100 - prefixAdvertisements: - - cidr: 172.218.4.0/26 - communities: - - bgp-large-community - - 63400:120 -``` - -## BGP configuration definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -- The resource with the name `default` has a specific meaning - this contains the BGP global default configuration. -- The resources with the name `node.` contain the node-specific overrides, and will be applied to the node ``. When deleting a node the BGPConfiguration resource associated with the node will also be deleted. Only prefixAdvertisements, listenPort, and logSeverityScreen can be overridden this way. - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------- | --------------------------------------------------------------- | -| logSeverityScreen | Global log level | Debug, Info, Warning, Error, Fatal | string | `Info` | -| nodeToNodeMeshEnabled | Full BGP node-to-node mesh. Only valid on the global `default` BGPConfiguration. | true, false | string | true | -| asNumber | The default local AS Number that {{prodname}} should use when speaking with BGP peers. Only valid on the global `default` BGPConfiguration; to set a per-node override, use the `bgp` field on the [Node resource](node.mdx). | A valid AS Number, may be specified in dotted notation. | integer/string | 64512 | -| serviceClusterIPs | The CIDR blocks for Kubernetes Service Cluster IPs to be advertised over BGP. Only valid on the global `default` BGPConfiguration: will be ignored otherwise. | A list of valid IPv4 or IPv6 CIDR blocks. | List of `cidr: /` values. | Empty List | -| serviceExternalIPs | The CIDR blocks for Kubernetes Service External IPs to be advertised over BGP. Kubernetes Service External IPs will only be advertised if they are within one of these blocks. Only valid on the global `default` BGPConfiguration: will be ignored otherwise. | A list of valid IPv4 or IPv6 CIDR blocks. | List of `cidr: /` values. | Empty List | -| serviceLoadBalancerIPs | The CIDR blocks for Kubernetes Service status.LoadBalancer IPs to be advertised over BGP. Kubernetes LoadBalancer IPs will only be advertised if they are within one of these blocks. Only valid on the global `default` BGPConfiguration: will be ignored otherwise. | A list of valid IPv4 or IPv6 CIDR blocks. | List of `cidr: /` values. | Empty List | -| listenPort | The port where BGP protocol should listen. | A valid port number. | integer | 179 | -| bindMode | Indicates whether to listen for BGP connections on all addresses (None) or only on the node's canonical IP address Node.Spec.BGP.IPvXAddress (NodeIP). If this field is changed when calico-node is already running, the change will not take effect until calico-node is manually restarted. | None, NodeIP. | string | None | -| communities | List of BGP community names and their values, communities are not advertised unless they are used in [prefixAdvertisements](#prefixadvertisements). | | List of [communities](#communities) | -| prefixAdvertisements | List of per-prefix advertisement properties, like BGP communities. | | List of [prefixAdvertisements](#prefixadvertisements) | -| nodeMeshPassword | BGP password for the all the peerings in a full mesh configuration. | | [BGPPassword](bgppeer.mdx#bgppassword) | `nil` (no password) | -| nodeMeshMaxRestartTime | Restart time that is announced by BIRD in the BGP graceful restart capability and that specifies how long the neighbor would wait for the BGP session to re-establish after a restart before deleting stale routes in full mesh configurations. Note: extra care should be taken when changing this configuration, as it may break networking in your cluster. When not specified, BIRD uses the default value of 120 seconds. | `10s`, `120s`, `2m` etc. | [Duration string][parse-duration] | `nil` (empty config, BIRD will use the default value of `120s`) | -| ignoredInterfaces | List of network interfaces to be excluded when reading device routes. | A list of network interface names. The names can contain the wildcard character asterisk `*` to specify groups of interface names. | List of string | `nil` (no extra interfaces to be ignored) | - -### communities - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------ | -| name | Name or identifier for the community. This should be used in [prefixAdvertisements](#prefixAdvertisements) to advertise the community value. | | string | -| value | Standard or large BGP community value. | For standard community, value should be in `aa:nn` format, where both `aa` and `nn` are 16 bit integers.
    For large community, value should be `aa:nn:mm` format, where `aa`, `nn` and `mm` are all 32 bit integers.
    Where `aa` is an AS Number, `nn` and `mm` are per-AS identifier. | string | - -### prefixAdvertisements - -| Field | Description | Accepted Values | Schema | -| ----------- | ----------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------- | -| cidr | CIDR for which properties should be advertised. | `cidr: XXX.XXX.XXX.XXX/XX` | string | -| communities | BGP communities to be advertised. | Communities can be list of either community names already defined in [communities](#communities) or community value of format `aa:nn` or `aa:nn:mm`.
    For standard community, value should be in `aa:nn` format, where both `aa` and `nn` are 16 bit integers.
    For large community, value should be `aa:nn:mm` format, where `aa`, `nn` and `mm` are all 32 bit integers.
    Where `aa` is an AS Number, `nn` and `mm` are per-AS identifier. | List of string | - -## Supported operations - -| Datastore type | Create | Delete | Delete (Global `default`) | Update | Get/List | Notes | -| --------------------- | ------ | ------ | ------------------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | No | Yes | Yes | -| Kubernetes API server | Yes | Yes | No | Yes | Yes | diff --git a/calico_versioned_docs/version-3.25/reference/resources/bgppeer.mdx b/calico_versioned_docs/version-3.25/reference/resources/bgppeer.mdx deleted file mode 100644 index a7ddc5c924..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/bgppeer.mdx +++ /dev/null @@ -1,119 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# BGP peer - -import Selectors from '@site/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx'; - -A BGP peer resource (`BGPPeer`) represents a remote BGP peer with -which the node(s) in a {{prodname}} cluster will peer. -Configuring BGP peers allows you to peer a {{prodname}} network -with your datacenter fabric (e.g. ToR). For more -information on cluster layouts, see {{prodname}}'s documentation on -[{{prodname}} over IP fabrics](../architecture/design/l3-interconnect-fabric.mdx). - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: BGPPeer -metadata: - name: some.name -spec: - node: rack1-host1 - peerIP: 192.168.1.1 - asNumber: 63400 -``` - -## BGP peer definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Must be specified. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | --------------------------------- | ---------------------------------------------------------------------------- | -| node | If specified, the scope is node level, otherwise the scope is global. | The hostname of the node to which this peer applies. | string | | -| peerIP | The IP address of this peer and an optional port number. If port number is not set, and peer is Calico node with `listenPort` set, then `listenPort` is used. | Valid IPv4 or IPv6 address. If port number is set use, `IPv4:port` or `[IPv6]:port` format. | string | | -| asNumber | The remote AS Number of the peer. | A valid AS Number, may be specified in dotted notation. | integer/string | -| nodeSelector | Selector for the nodes that should have this peering. When this is set, the `node` field must be empty. | | [selector](#selectors) | -| peerSelector | Selector for the remote nodes to peer with. When this is set, the `peerIP` and `asNumber` fields must be empty. | | [selector](#selectors) | -| keepOriginalNextHop | Maintain and forward the original next hop BGP route attribute to a specific Peer within a different AS. | | boolean | -| password | BGP password for the peerings generated by this BGPPeer resource. | | [BGPPassword](#bgppassword) | `nil` (no password) | -| sourceAddress | Specifies whether and how to configure a source address for the peerings generated by this BGPPeer resource. Default value "UseNodeIP" means to configure the node IP as the source address. "None" means not to configure a source address. | "UseNodeIP", "None" | string | "UseNodeIP" | -| maxRestartTime | Restart time that is announced by BIRD in the BGP graceful restart capability and that specifies how long the neighbor would wait for the BGP session to re-establish after a restart before deleting stale routes. Note: extra care should be taken when changing this configuration, as it may break networking in your cluster. When not specified, BIRD uses the default value of 120 seconds. | `10s`, `120s`, `2m` etc. | [Duration string][parse-duration] | `nil` (empty config, BIRD will use the default value of `120s`) | -| numAllowedLocalASNumbers | The number of local AS numbers to allow in the AS path for received routes. This disables BGP loop prevention and should only be used if necessary. | | integer | `nil` (BIRD will default to 0 meaning no change to loop prevention behavior) | -| ttlSecurity | Enables the generalized TTL security mechanism (GTSM) which protects against spoofed packets by ignoring received packets with a smaller than expected TTL value. The provided value is the number of hops (edges) between the peers. | 0 - 255 | 8-bit integer | `nil` (results in BIRD configuration `ttl security off`) | -| reachableBy | Adds a static route that may be needed to connect to a peer. In some cases, not having a static route for BGP peering results in route flapping. By adding the address of the gateway that the peer is connected to, a static route is added to prevent route flapping. | The address of the gateway that the peer is connected to | string | | - -:::tip - -The cluster-wide default local AS number used when speaking with a peer is controlled by the -[BGPConfiguration resource](bgpconfig.mdx). That value can be overridden per-node by using the `bgp` field of -the [node resource](node.mdx). - -::: - -:::note - -If you want to use `BGPPeer` resources to configure BGP sessions between nodes within the cluster - for example, to set non-default BGPPeer parameters - you must disable node-to-node mesh by setting the `nodeToNodeMeshEnabled` parameter to false in the [BGPConfiguration](./bgpconfig) resource. - -::: - -### BGPPassword - -:::note - -BGP passwords must be 80 characters or fewer. If a password longer than that -is configured, the BGP sessions with that password will fail to be established. - -::: - -| Field | Description | Schema | -| ------------ | ------------------------------- | ----------------- | -| secretKeyRef | Get the password from a secret. | [KeyRef](#keyref) | - -### KeyRef - -KeyRef tells {{prodname}} where to get a BGP password. The referenced Kubernetes -secret must be in the same namespace as the {{nodecontainer}} pod. - -| Field | Description | Schema | -| ----- | ------------------------- | ------ | -| name | The name of the secret | string | -| key | The key within the secret | string | - -## Peer scopes - -BGP Peers can exist at either global or node-specific scope. A peer's scope -determines which `{{nodecontainer}}`s will attempt to establish a BGP session with that peer. -If `{{nodecontainer}}` has a `listenPort` set in `BGPConfiguration`, it will be used in peering. - -### Global peer - -To assign a BGP peer a global scope, omit the `node` and `nodeSelector` fields. All nodes in -the cluster will attempt to establish BGP connections with it - -### Node-specific peer - -A BGP peer can also be node-specific. When the `node` field is included, only the specified node -will peer with it. When the `nodeSelector` field is included, the nodes with labels that match that selector -will peer with it. - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API server | Yes | Yes | Yes | - -## Selectors - - - -[parse-duration]: https://golang.org/pkg/time/#ParseDuration diff --git a/calico_versioned_docs/version-3.25/reference/resources/blockaffinity.mdx b/calico_versioned_docs/version-3.25/reference/resources/blockaffinity.mdx deleted file mode 100644 index 451a6bad29..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/blockaffinity.mdx +++ /dev/null @@ -1,31 +0,0 @@ ---- -description: IP address management block affinity ---- - -# Block affinity - -A block affinity resource (`BlockAffinity`) represents the affinity for an IPAM block. These are managed by Calico IPAM. - -## Block affinity definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | -| name | Unique name to describe this resource instance. Must be specified. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------- | -------------------------------------------------------------------------- | ----------------------------------- | ------- | ------- | -| state | State of the affinity with regard to any referenced IPAM blocks. | confirmed, pending, pendingDeletion | string | | -| node | The node that this affinity is assigned to. | The hostname of the node | string | | -| cidr | The CIDR range this block affinity references. | A valid IPv4 or IPv6 CIDR. | string | | -| deleted | When set to true, clients should treat this block as if it does not exist. | true, false | boolean | `false` | - -## Supported operations - -| Datastore type | Create | Delete | Update | Get/List | Watch | -| --------------------- | ------ | ------ | ------ | -------- | ----- | -| etcdv3 | No | No | No | Yes | Yes | -| Kubernetes API server | No | No | No | Yes | Yes | diff --git a/calico_versioned_docs/version-3.25/reference/resources/caliconodestatus.mdx b/calico_versioned_docs/version-3.25/reference/resources/caliconodestatus.mdx deleted file mode 100644 index dcd871a1c5..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/caliconodestatus.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Calico node status - -A Calico node status resource (`CalicoNodeStatus`) represents a collection of status information for a node that {{prodname}} reports back to the user for use during troubleshooting. - -As of today, status of BGP agents, BGP sessions and routes exposed to BGP agents are collected from Linux nodes only. **Windows nodes are not supported at this time.** -Calico node status resource is only valid when {{prodname}} BGP networking is in use. - -### Notes - -The updating of `CalicoNodeStatus` will have a small performance impact on CPU/Memory usage of the node as well as adding load to kubernetes apiserver. - -In our testing on a ten node, full mesh cluster, a `CalicoNodeStatus` resource was created for each node where the update interval was set to ten seconds. On each node, this resulted in an increase in CPU use of 5% of a vCPU and an increase of 4MB of memory. The control plane node recorded an increase in CPU usage of 5% of a vCPU for these 10 nodes. - -:::caution - -The implementation of `CalicoNodeStatus` is designed to handle a small number of nodes (less than 10 is recommended) reporting back status in the same time. If `CalicoNodeStatus` are created for a large number of nodes, and with short update interval, -the kubernetes apiserver may become slower and less responsive. -You should create `CalicoNodeStatus` for the node you are interested in and for debugging purpose only. `CalicoNodeStatus` resource should be deleted upon the completion of the debugging process. - -::: - -## Sample YAML - -To use this function, the user creates a CalicoNodeStatus object for the node, specifying the information to collect and the interval it should be collected at. This example collects information for node "my-kadm-node-0" with an update interval of 10 seconds. - -```bash -kubectl apply -f -<` contain the node-specific overrides, and will be applied to the node ``. When deleting a node the FelixConfiguration resource associated with the node will also be deleted. - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------- | --------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| awsSrcDstCheck | Controls automatically setting [source-destination-check](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_NAT_Instance.html#EIP_Disable_SrcDestCheck) on an AWS EC2 instance running Felix. Setting the value to `Enable` will set the check value in the instance description to `true`. For `Disable`, the check value will be `false`. Setting must be `Disable` if you want the EC2 instance to process traffic not matching the host interface IP address. For example, EKS cluster using Calico CNI with `VXLANMode=CrossSubnet`. Check [IAM role and profile configuration](#aws-iam-rolepolicy-for-source-destination-check-configuration) for setting the necessary permission for this setting to work. | DoNothing, Enable, Disable | string | `DoNothing` | -| chainInsertMode | Controls whether Felix hooks the kernel's top-level iptables chains by inserting a rule at the top of the chain or by appending a rule at the bottom. `Insert` is the safe default since it prevents {{prodname}}'s rules from being bypassed. If you switch to `Append` mode, be sure that the other rules in the chains signal acceptance by falling through to the {{prodname}} rules, otherwise the {{prodname}} policy will be bypassed. | Insert, Append | string | `Insert` | -| healthTimeoutOverrides | A list of overrides for Felix's internal liveness/readiness timeouts. | see [below](#health-timeout-overrides) | List of `HealthTimeoutOverride` objects | `[]` | -| dataplaneWatchdogTimeout | Deprecated, use `healthTimeoutOverrides` instead. Timeout before the main dataplane goroutine is determined to have hung and Felix will report non-live and non-ready. Can be increased if the liveness check incorrectly fails (for example if Felix is running slowly on a heavily loaded system). | `90s`, `120s`, `10m` etc. | duration | `90s` | -| defaultEndpointToHostAction | This parameter controls what happens to traffic that goes from a workload endpoint to the host itself (after the traffic hits the endpoint egress policy). By default {{prodname}} blocks traffic from workload endpoints to the host itself with an iptables "DROP" action. If you want to allow some or all traffic from endpoint to host, set this parameter to `Return` or `Accept`. Use `Return` if you have your own rules in the iptables "INPUT" chain; {{prodname}} will insert its rules at the top of that chain, then `Return` packets to the "INPUT" chain once it has completed processing workload endpoint egress policy. Use `Accept` to unconditionally accept packets from workloads after processing workload endpoint egress policy. | Drop, Return, Accept | string | `Drop` | -| deviceRouteSourceAddress | IPv4 address to set as the source hint for routes programmed by Felix. When not set the source address for local traffic from host to workload will be determined by the kernel. | IPv4 | string | `""` | -| deviceRouteSourceAddressIPv6 | IPv6 address to set as the source hint for routes programmed by Felix. When not set the source address for local traffic from host to workload will be determined by the kernel. | IPv6 | string | `""` | -| deviceRouteProtocol | This defines the route protocol added to programmed device routes. | Protocol | int | RTPROT_BOOT | -| externalNodesCIDRList | A comma-delimited list of CIDRs of external non-calico nodes that can source tunnel traffic for acceptance by calico-nodes. | IPv4 | string | `""` | -| failsafeInboundHostPorts | UDP/TCP/SCTP protocol/cidr/port groupings that Felix will allow incoming traffic to host endpoints on irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. The default value allows SSH access, etcd, BGP, DHCP and the Kubernetes API. | | List of [ProtoPort](#protoport) |

    - protocol: tcp
      port: 22
    - protocol: udp
      port: 68
    - protocol: tcp
      port: 179
    - protocol: tcp
      port: 2379
    - protocol: tcp
      port: 2380
    - protocol: tcp
      port: 5473
    - protocol: tcp
      port: 6443
    - protocol: tcp
      port: 6666
    - protocol: tcp
      port: 6667

    | -| failsafeOutboundHostPorts | UDP/TCP/SCTP protocol/port groupings that Felix will allow outgoing traffic from host endpoints to irrespective of the security policy. This is useful to avoid accidentally cutting off a host with incorrect configuration. The default value opens etcd's standard ports to ensure that Felix does not get cut off from etcd as well as allowing DHCP, DNS, BGP and the Kubernetes API. | | List of [ProtoPort](#protoport) |

    - protocol: udp
      port: 53
    - protocol: udp
      port: 67
    - protocol: tcp
      port: 179
    - protocol: tcp
      port: 2379
    - protocol: tcp
      port: 2380
    - protocol: tcp
      port: 5473
    - protocol: tcp
      port: 6443
    - protocol: tcp
      port: 6666
    - protocol: tcp
      port: 6667

    | -| featureDetectOverride | Is used to override the feature detection. Values are specified in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" or "false" will force the feature, empty or omitted values are auto-detected. | string | string | `""` | -| genericXDPEnabled | When enabled, Felix can fallback to the non-optimized `generic` XDP mode. This should only be used for testing since it doesn't improve performance over the non-XDP mode. | true,false | boolean | `false` | -| interfaceExclude | A comma-separated list of interface names that should be excluded when Felix is resolving host endpoints. The default value ensures that Felix ignores Kubernetes' internal `kube-ipvs0` device. If you want to exclude multiple interface names using a single value, the list supports regular expressions. For regular expressions you must wrap the value with `/`. For example having values `/^kube/,veth1` will exclude all interfaces that begin with `kube` and also the interface `veth1`. | string | string | `kube-ipvs0` | -| interfacePrefix | The interface name prefix that identifies workload endpoints and so distinguishes them from host endpoint interfaces. Note: in environments other than bare metal, the orchestrators configure this appropriately. For example our Kubernetes and Docker integrations set the 'cali' value, and our OpenStack integration sets the 'tap' value. | string | string | `cali` | -| ipipEnabled | Optional, you shouldn't need to change this setting as Felix calculates if IPIP should be enabled based on the existing IP Pools. When set, this overrides whether Felix should configure an IPinIP interface on the host. When explicitly disabled in FelixConfiguration, Felix will not clean up addresses from the `tunl0` interface (use this if you need to add addresses to that interface and don't want to have them removed). | `true`, `false`, unset | optional boolean | unset | -| ipipMTU | The MTU to set on the tunnel device. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx) | int | int | `0` | -| ipsetsRefreshInterval | Period at which Felix re-checks the IP sets in the dataplane to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable IP sets refresh. Note: the default for this value is lower than the other refresh intervals as a workaround for a [Linux kernel bug](https://bugzilla.netfilter.org/show_bug.cgi?id=1119) that was fixed in kernel version 4.11. If you are using v4.11 or greater you may want to set this to a higher value to reduce Felix CPU usage. | `5s`, `10s`, `1m` etc. | duration | `10s` | -| iptablesFilterAllowAction | This parameter controls what happens to traffic that is accepted by a Felix policy chain in the iptables filter table (i.e. a normal policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. | Accept, Return | string | `Accept` | -| iptablesBackend | This parameter controls which variant of iptables Felix uses. If using Felix on a system that uses the netfilter-backed iptables binaries, set this to `nft`. | Legacy, nft, Auto | string | `Auto` | -| iptablesLockFilePath | Location of the iptables lock file. You may need to change this if the lock file is not in its standard location (for example if you have mapped it into Felix's container at a different path). | string | string | `/run/xtables.lock` | -| iptablesLockProbeInterval | Time that Felix will wait between attempts to acquire the iptables lock if it is not available. Lower values make Felix more responsive when the lock is contended, but use more CPU. | `5s`, `10s`, `1m` etc. | duration | `50ms` | -| iptablesLockTimeout | Time that Felix will wait for the iptables lock, or 0, to disable. To use this feature, Felix must share the iptables lock file with all other processes that also take the lock. When running Felix inside a container, this requires the /run directory of the host to be mounted into the {{nodecontainer}} or calico/felix container. | `5s`, `10s`, `1m` etc. | duration | `0` (Disabled) | -| iptablesMangleAllowAction | This parameter controls what happens to traffic that is accepted by a Felix policy chain in the iptables mangle table (i.e. a pre-DNAT policy chain). The default will immediately `Accept` the traffic. Use `Return` to send the traffic back up to the system chains for further processing. | Accept, Return | string | `Accept` | -| iptablesMarkMask | Mask that Felix selects its IPTables Mark bits from. Should be a 32 bit hexadecimal number with at least 8 bits set, none of which clash with any other mark bits in use on the system. | netmask | netmask | `0xff000000` | -| iptablesNATOutgoingInterfaceFilter | This parameter can be used to limit the host interfaces on which Calico will apply SNAT to traffic leaving a Calico IPAM pool with "NAT outgoing" enabled. This can be useful if you have a main data interface, where traffic should be SNATted and a secondary device (such as the docker bridge) which is local to the host and doesn't require SNAT. This parameter uses the iptables interface matching syntax, which allows `+` as a wildcard. Most users will not need to set this. Example: if your data interfaces are eth0 and eth1 and you want to exclude the docker bridge, you could set this to `eth+` | string | string | `""` | -| iptablesPostWriteCheckInterval | Period after Felix has done a write to the dataplane that it schedules an extra read back to check the write was not clobbered by another process. This should only occur if another application on the system doesn't respect the iptables lock. | `5s`, `10s`, `1m` etc. | duration | `1s` | -| iptablesRefreshInterval | Period at which Felix re-checks all iptables state to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable iptables refresh. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| ipv6Support | IPv6 support for Felix | true, false | boolean | `true` | -| kubeNodePortRanges | This parameter holds a list of port ranges used for service node ports. Only used if felix detects kube-proxy running in IPVS mode. | A list of strings | A list of port ranges | `30000:32767` -| logFilePath | The full path to the Felix log. Set to `none` to disable file logging. | string | string | `/var/log/calico/felix.log` | -| logPrefix | The log prefix that Felix uses when rendering LOG rules. | string | string | `calico-packet` | -| logSeverityFile | The log severity above which logs are sent to the log file. | Same as `logSeveritySys` | string | `Info` | -| logSeverityScreen | The log severity above which logs are sent to the stdout. | Same as LogSeveritySys | string | `Info` | -| logSeveritySys | The log severity above which logs are sent to the syslog. Set to `none` for no logging to syslog. | Debug, Info, Warning, Error, Fatal | string | `Info` | -| logDebugFilenameRegex | controls which source code files have their Debug log output included in the logs. Only logs from files with names that match the given regular expression are included. The filter only applies to Debug level logs. | regex | string | `""` | -| maxIpsetSize | Maximum size for the ipsets used by Felix. Should be set to a number that is greater than the maximum number of IP addresses that are ever expected in a selector. | int | int | `1048576` | -| metadataAddr | The IP address or domain name of the server that can answer VM queries for cloud-init metadata. In OpenStack, this corresponds to the machine running nova-api (or in Ubuntu, nova-api-metadata). A value of `none` (case insensitive) means that Felix should not set up any NAT rule for the metadata path. | IPv4, hostname, none | string | `127.0.0.1` | -| metadataPort | The port of the metadata server. This, combined with global.MetadataAddr (if not 'None'), is used to set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. In most cases this should not need to be changed. | int | int | `8775` | -| natOutgoingAddress | The source address to use for outgoing NAT. By default an iptables MASQUERADE rule determines the source address which will use the address on the host interface the traffic leaves on. | IPV4 | string | `""` | -| openstackRegion | The name of the region that a particular Felix belongs to. In a [multi-region Calico/OpenStack deployment](../../networking/openstack/multiple-regions.mdx), this must be configured somehow for each Felix (here in the datamodel, or in felix.cfg or the environment on each compute node), and must match the [calico] openstack_region value configured in neutron.conf on each node. | string of lower case alphanumeric characters or '-', starting and ending with an alphanumeric character | string | `""` | -| policySyncPathPrefix | File system path where Felix notifies services of policy changes over Unix domain sockets. This is only required if you're configuring [application layer policy](../../network-policy/istio/app-layer-policy.mdx). Set to `""` to disable. | string | string | `""` | -| prometheusGoMetricsEnabled | Set to `false` to disable Go runtime metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. | boolean | boolean | `true` | -| prometheusMetricsEnabled | Set to `true` to enable the experimental Prometheus metrics server in Felix. | boolean | boolean | `false` | -| prometheusMetricsHost | TCP network address that the Prometheus metrics server should bind to. | IPv4, IPv6, Hostname | string | `""` | -| prometheusMetricsPort | TCP port that the Prometheus metrics server should bind to. | int | int | `9091` | -| prometheusProcessMetricsEnabled | Set to `false` to disable process metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. | boolean | boolean | `true` | -| removeExternalRoutes | Whether or not to remove device routes that have not been programmed by Felix. Disabling this will allow external applications to also add device routes. | bool | boolean | `true` | -| reportingInterval | Interval at which Felix reports its status into the datastore. 0 means disabled and is correct for Kubernetes-only clusters. Must be non-zero in OpenStack deployments. | `5s`, `10s`, `1m` etc. | duration | `30s` | -| reportingTTL | Time-to-live setting for process-wide status reports. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| routeRefreshInterval | Period at which Felix re-checks the routes in the dataplane to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable route refresh. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| routeTableRange | _deprecated in favor of `RouteTableRanges`_ Calico programs additional Linux route tables for various purposes. `RouteTableRange` specifies the indices of the route tables that Calico should use. | | [RouteTableRanges](#routetablerange) | `""` | -| routeTableRanges | Calico programs additional Linux route tables for various purposes. `RouteTableRanges` specifies a set of table index ranges that Calico should use. Deprecates `RouteTableRange`, overrides `RouteTableRange` | | [RouteTableRanges](#routetableranges) | `[{"Min": 1, "Max": 250}]` | -| routeSyncDisabled | Set to `true` to disable Calico programming routes to local workloads. | boolean | boolean | `false` | -| serviceLoopPrevention | When [service IP advertisement is enabled](../../networking/configuring/advertise-service-ips.mdx), prevent routing loops to service IPs that are not in use, by dropping or rejecting packets that do not get DNAT'd by kube-proxy. Unless set to "Disabled", in which case such routing loops continue to be allowed. | `Drop`, `Reject`, `Disabled` | string | `Drop` | -| workloadSourceSpoofing | Controls whether pods can enable source IP address spoofing with the `cni.projectcalico.org/allowedSourcePrefixes` annotation. When set to `Any`, pods can use this annotation to send packets from any IP address. | `Any`, `Disabled` | string | `Disabled` | -| sidecarAccelerationEnabled | Enable experimental acceleration between application and proxy sidecar when using [application layer policy](../../network-policy/istio/app-layer-policy.mdx). [Default: `false`] | boolean | boolean | `false` | -| usageReportingEnabled | Reports anonymous {{prodname}} version number and cluster size to projectcalico.org. Logs warnings returned by the usage server. For example, if a significant security vulnerability has been discovered in the version of {{prodname}} being used. | boolean | boolean | `true` | -| usageReportingInitialDelay | Minimum initial delay before first usage report. | `5s`, `10s`, `1m` etc. | duration | `300s` | -| usageReportingInterval | The interval at which Felix does usage reports. The default is 1 day. | `5s`, `10s`, `1m` etc. | duration | `24h` | -| vxlanEnabled | Optional, you shouldn't need to change this setting as Felix calculates if VXLAN should be enabled based on the existing IP Pools. When set, this overrides whether Felix should create the VXLAN tunnel device for VXLAN networking. | `true`, `false`, unset | optional boolean | unset | -| vxlanMTU | MTU to use for the IPv4 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. | int | int | `0` | -| vxlanMTUV6 | MTU to use for the IPv6 VXLAN tunnel device. Zero value means auto-detect. Also controls NodePort MTU when eBPF enabled. | int | int | `0` | -| vxlanPort | Port to use for VXLAN traffic. A value of `0` means "use the kernel default". | int | int | `4789` | -| vxlanVNI | Virtual network ID to use for VXLAN traffic. A value of `0` means "use the kernel default". | int | int | `4096` | -| allowVXLANPacketsFromWorkloads | Set to `true` to allow VXLAN encapsulated traffic from workloads. | boolean | boolean | `false` | -| allowIPIPPacketsFromWorkloads | Set to `true` to allow IPIP encapsulated traffic from workloads. | boolean | boolean | `false` | -| wireguardEnabled | Enable encryption for IPv4 on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardEnabledV6 | Enable encryption for IPv6 on WireGuard supported nodes in cluster. When enabled, pod to pod traffic will be sent over encrypted tunnels between the nodes. | `true`, `false` | boolean | `false` | -| wireguardInterfaceName | Name of the IPv4 WireGuard interface created by Felix. If you change the name, and want to clean up the previously-configured interface names on each node, this is a manual process. | string | string | wireguard.cali | -| wireguardInterfaceNameV6 | Name of the IPv6 WireGuard interface created by Felix. If you change the name, and want to clean up the previously-configured interface names on each node, this is a manual process. | string | string | wg-v6.cali | -| wireguardListeningPort | Port used by IPv4 WireGuard tunnels. Felix sets up an IPv4 WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51820 | -| wireguardListeningPortV6 | Port used by IPv6 WireGuard tunnels. Felix sets up an IPv6 WireGuard tunnel on each node specified by this port. Available for configuration only in the global FelixConfiguration resource; setting it per host, config-file or environment variable will not work. | 1-65535 | int | 51821 | -| wireguardMTU | MTU set on the IPv4 WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardMTUV6 | MTU set on the IPv6 WireGuard interface created by Felix. Zero value means auto-detect. See [Configuring MTU](../../networking/configuring/mtu.mdx). | int | int | 0 | -| wireguardRoutingRulePriority | WireGuard routing rule priority value set up by Felix. If you change the default value, set it to a value most appropriate to routing rules for your nodes. | 1-32765 | int | 99 | -| wireguardHostEncryptionEnabled | **Experimental**: Adds host-namespace workload IP's to WireGuard's list of peers. Should **not** be enabled when WireGuard is enabled on a cluster's control plane node, as networking deadlock can occur. | true, false | boolean | false | -| wireguardKeepAlive | WireguardKeepAlive controls Wireguard PersistentKeepalive option. Set 0 to disable. [Default: 0] | `5s`, `10s`, `1m` etc. | duration | `0` | -| xdpRefreshInterval | Period at which Felix re-checks the XDP state in the dataplane to ensure that no other process has accidentally broken {{prodname}}'s rules. Set to 0 to disable XDP refresh. | `5s`, `10s`, `1m` etc. | duration | `90s` | -| xdpEnabled | When `bpfEnabled` is `false`: enable XDP acceleration for host endpoint policies. When `bpfEnabled` is `true`, XDP is automatically used for Calico policy where that makes sense, regardless of this setting. [Default: `true`] | true,false | boolean | `true` | -| bpfEnabled | Enable eBPF dataplane mode. eBPF mode has some limitations, see the [HOWTO guide](../../operations/ebpf/enabling-ebpf.mdx) for more details. | true, false | boolean | false | -| bpfDisableUnprivileged | If true, Felix sets the kernel.unprivileged_bpf_disabled sysctl to disable unprivileged use of BPF. This ensures that unprivileged users cannot access Calico's BPF maps and cannot insert their own BPF programs to interfere with the ones that {{prodname}} installs. | true, false | boolean | true | -| bpfLogLevel | In eBPF dataplane mode, the log level used by the BPF programs. The logs are emitted to the BPF trace pipe, accessible with the command `tc exec bpf debug`. | Off,Info,Debug | string | Off | -| bpfDataIfacePattern | In eBPF dataplane mode, controls which interfaces Felix should attach BPF programs to catch traffic to/from the external network. This needs to match the interfaces that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to NodePorts and services from outside the cluster. It should not match the workload interfaces (usually named cali...).. | regular expression | string | ^(en.*|eth.*|tunl0$) | -| bpfL3IfacePattern | In eBPF dataplane mode, allows to list tunnel devices like wireguard or vxlan (i.e., L3 devices) in addition to BPFDataIfacePattern. That is, tunnel interfaces not created by Calico, that Calico workload traffic flows over as well as any interfaces that handle incoming traffic to nodeports and services from outside the cluster. | regular expression | string | "" | -| bpfConnectTimeLoadBalancingEnabled | In eBPF dataplane mode, controls whether Felix installs the connect-time load balancer. In the current release, the connect-time load balancer is required for the host to reach kubernetes services. | true,false | boolean | true | -| bpfExternalServiceMode | In eBPF dataplane mode, controls how traffic from outside the cluster to NodePorts and ClusterIPs is handled. In Tunnel mode, packet is tunneled from the ingress host to the host with the backing pod and back again. In DSR mode, traffic is tunneled to the host with the backing pod and then returned directly; this requires a network that allows direct return. | Tunnel,DSR | string | Tunnel | -| bpfKubeProxyIptablesCleanupEnabled | In eBPF dataplane mode, controls whether Felix will clean up the iptables rules created by the Kubernetes `kube-proxy`; should only be enabled if `kube-proxy` is not running. | true,false | boolean | true | -| bpfKubeProxyMinSyncPeriod | In eBPF dataplane mode, controls the minimum time between dataplane updates for Felix's embedded `kube-proxy` implementation. | `5s`, `10s`, `1m` etc. | duration | `1s` | -| BPFKubeProxyEndpointSlicesEnabled | In eBPF dataplane mode, controls whether Felix's embedded kube-proxy derives its services from Kubernetes' EndpointSlices resources. Using EndpointSlices is more efficient but it requires EndpointSlices support to be enabled at the Kubernetes API server. | true,false | boolean | false | -| bpfMapSizeConntrack | In eBPF dataplane mode, controls the size of the conntrack map. | int | int | 512000 | -| bpfMapSizeIPSets | In eBPF dataplane mode, controls the size of the ipsets map. | int | int | 1048576 | -| bpfMapSizeNATAffinity | In eBPF dataplane mode, controls the size of the NAT affinity map. | int | int | 65536 | -| bpfMapSizeNATFrontend | In eBPF dataplane mode, controls the size of the NAT front end map. | int | int | 65536 | -| bpfMapSizeNATBackend | In eBPF dataplane mode, controls the size of the NAT back end map. | int | int | 262144 | -| bpfMapSizeRoute | In eBPF dataplane mode, controls the size of the route map. | int | int | 262144 | -| bpfPolicyDebugEnabled | In eBPF dataplane mode, controls whether felix will collect policy dump for each interface. | true, false | boolean | true | -| routeSource | Where Felix gets is routing information from for VXLAN and the BPF dataplane. The CalicoIPAM setting is more efficient because it supports route aggregation, but it only works when Calico's IPAM or host-local IPAM is in use. Use the WorkloadIPs setting if you are using Calico's VXLAN or BPF dataplane and not using Calico IPAM or host-local IPAM. | CalicoIPAM,WorkloadIPs | string | `CalicoIPAM` | -| mtuIfacePattern | Pattern used to discover the host's interface for MTU auto-detection. | regex | string | ^((en|wl|ww|sl|ib)[opsvx].*|(eth|wlan|wwan).*) | - -
    - -`genericXDPEnabled` and `xdpRefreshInterval` are only relevant when `bpfEnabled` is `false` and -`xdpEnabled` is `true`; in other words when XDP is being used to accelerate denial-of-service -prevention policies in the iptables dataplane. - -When `bpfEnabled` is `true` the "xdp" settings all have no effect; in BPF mode the implementation of -policy is always accelerated, using the best available BPF technology. - -### Health Timeout Overrides - -Felix has internal liveness and readiness watchdog timers that monitor its various loops. -If a loop fails to "check in" within the allotted timeout then Felix will report non-Ready -or non-Live on its health port (which is monitored by Kubelet in a Kubernetes system). -If Felix reports non-Live, this can result in the Pod being restarted. - -In Kubernetes, if you see the calico-node Pod readiness or liveness checks fail -intermittently, check the calico-node Pod log for a log from Felix that gives the -overall health status (the list of components will depend on which features are enabled): - -``` -+---------------------------+---------+----------------+-----------------+--------+ -| COMPONENT | TIMEOUT | LIVENESS | READINESS | DETAIL | -+---------------------------+---------+----------------+-----------------+--------+ -| CalculationGraph | 30s | reporting live | reporting ready | | -| FelixStartup | 0s | reporting live | reporting ready | | -| InternalDataplaneMainLoop | 1m30s | reporting live | reporting ready | | -+---------------------------+---------+----------------+-----------------+--------+ -``` - -If some health timeouts show as "timed out" it may help to apply an override -using the `healthTimeoutOverrides` field: - -```yaml noValidation -... -spec: - healthTimeoutOverrides: - - name: InternalDataplaneMainLoop - timeout: "5m" - - name: CalculationGraph - timeout: "1m30s" - ... -``` - -A timeout value of 0 disables the timeout. - -### ProtoPort - -| Field | Description | Accepted Values | Schema | -| -------- | -------------------- | ------------------------------------ | ------ | -| port | The exact port match | 0-65535 | int | -| protocol | The protocol match | tcp, udp, sctp | string | -| net | The CIDR match | any valid CIDR (e.g. 192.168.0.0/16) | string | - -Keep in mind that in the following example, `net: ""` and `net: "0.0.0.0/0"` are processed as the same in the policy enforcement. - -```yaml noValidation - ... -spec: - failsafeInboundHostPorts: - - net: "192.168.1.1/32" - port: 22 - protocol: tcp - - net: "" - port: 67 - protocol: udp -failsafeOutboundHostPorts: - - net: "0.0.0.0/0" - port: 67 - protocol: udp - ... -``` - -### RouteTableRange - -The `RouteTableRange` option is now deprecated in favor of [RouteTableRanges](#routetableranges). - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------- | --------------- | ------ | -| min | Minimum index to use | 1-250 | int | -| max | Maximum index to use | 1-250 | int | - -### RouteTableRanges - -`RouteTableRanges` is a list of `RouteTableRange` objects: - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------- | --------------- | ------ | -| min | Minimum index to use | 1 - 4294967295 | int | -| max | Maximum index to use | 1 - 4294967295 | int | - -Each item in the `RouteTableRanges` list designates a range of routing tables available to Calico. By default, Calico will use a single range of `1-250`. If a range spans Linux's reserved table range (`253-255`) then those tables are automatically excluded from the list. It's possible that other table ranges may also be reserved by third-party systems unknown to Calico. In that case, multiple ranges can be defined to target tables below and above the sensitive ranges: - -```sh - target tables 65-99, and 256-1000, skipping 100-255 -calicoctl patch felixconfig default --type=merge -p '{"spec":{"routeTableRanges": [{"Min": 65, "Max": 99}, {"Min": 256, "Max": 1000}] }} -``` - -_Note_, for performance reasons, the maximum total number of routing tables that Felix will accept is 65535 (or 2\*16). - -Specifying both the `RouteTableRange` and `RouteTableRanges` arguments is not supported and will result in an error from the api. - -### AWS IAM Role/Policy for source-destination-check configuration - -Setting `awsSrcDstCheck` to `Disable` will automatically disable source-destination-check on EC2 instances in a cluster, provided necessary IAM roles and policies are set. One of the policies assigned to IAM role of cluster nodes must contain a statement similar to the following: - -```json -{ - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances", - "ec2:ModifyNetworkInterfaceAttribute" - ], - "Resource": "*" -} -``` - -If there are no policies attached to node roles containing the above statement, attach a new policy. For example, if a node role is `test-cluster-nodeinstance-role`, click on the IAM role in AWS console. In the `Permission policies` list, add a new inline policy with the above statement to the new policy JSON definition. For detailed information, see [AWS documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_create.html?icmpid=docs_iam_console). - -For an EKS cluster, the necessary IAM role and policy is available by default. No further actions are needed. - -## Supported operations - -| Datastore type | Create | Delete | Delete (Global `default`) | Update | Get/List | Notes | -| --------------------- | ------ | ------ | ------------------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | No | Yes | Yes | | -| Kubernetes API server | Yes | Yes | No | Yes | Yes | | diff --git a/calico_versioned_docs/version-3.25/reference/resources/globalnetworkpolicy.mdx b/calico_versioned_docs/version-3.25/reference/resources/globalnetworkpolicy.mdx deleted file mode 100644 index d3d7b0055f..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/globalnetworkpolicy.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Global network policy - -import Httpmatch from '@site/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx'; - -import Servicematch from '@site/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx'; - -import Serviceaccountmatch from '@site/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx'; - -import Ports from '@site/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx'; - -import SelectorScopes from '@site/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx'; - -import Selectors from '@site/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx'; - -import Entityrule from '@site/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx'; - -import Icmp from '@site/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx'; - -import Rule from '@site/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx'; - -A global network policy resource (`GlobalNetworkPolicy`) represents an ordered set of rules which are applied -to a collection of endpoints that match a [label selector](#selectors). - -`GlobalNetworkPolicy` is not a namespaced resource. `GlobalNetworkPolicy` applies to [workload endpoint resources](workloadendpoint.mdx) in all namespaces, and to [host endpoint resources](hostendpoint.mdx). -Select a namespace in a `GlobalNetworkPolicy` in the standard selector by using -`projectcalico.org/namespace` as the label name and a `namespace` name as the -value to compare against, e.g., `projectcalico.org/namespace == "default"`. -See [network policy resource](networkpolicy.mdx) for namespaced network policy. - -`GlobalNetworkPolicy` resources can be used to define network connectivity rules between groups of {{prodname}} endpoints and host endpoints, and -take precedence over [Profile resources](profile.mdx) if any are defined. - -## Sample YAML - -This sample policy allows TCP traffic from `frontend` endpoints to port 6379 on -`database` endpoints. - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkPolicy -metadata: - name: allow-tcp-6379 -spec: - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - metadata: - annotations: - from: frontend - to: database - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| ----- | ----------------------------------------- | --------------------------------------------------- | ------ | ------- | -| name | The name of the network policy. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | ---------------------- | --------------------------------------------- | -| order | Controls the order of precedence. {{prodname}} applies the policy with the lowest value first. | | float | | -| selector | Selects the endpoints to which this policy applies. | | [selector](#selectors) | all() | -| serviceAccountSelector | Selects the service account(s) to which this policy applies. Select all service accounts in the cluster with a specific name using the `projectcalico.org/name` label. | | [selector](#selectors) | all() | -| namespaceSelector | Selects the namespace(s) to which this policy applies. Select a specific namespace by name using the `projectcalico.org/name` label. | | [selector](#selectors) | all() | -| types | Applies the policy based on the direction of the traffic. To apply the policy to inbound traffic, set to `Ingress`. To apply the policy to outbound traffic, set to `Egress`. To apply the policy to both, set to `Ingress, Egress`. | `Ingress`, `Egress` | List of strings | Depends on presence of ingress/egress rules\* | -| ingress | Ordered list of ingress rules applied by policy. | | List of [Rule](#rule) | | -| egress | Ordered list of egress rules applied by this policy. | | List of [Rule](#rule) | | -| doNotTrack\*\* | Indicates to apply the rules in this policy before any data plane connection tracking, and that packets allowed by these rules should not be tracked. | true, false | boolean | false | -| preDNAT\*\* | Indicates to apply the rules in this policy before any DNAT. | true, false | boolean | false | -| applyOnForward\*\* | Indicates to apply the rules in this policy on forwarded traffic as well as to locally terminated traffic. | true, false | boolean | false | - -\* If `types` has no value, {{prodname}} defaults as follows. - -> | Ingress Rules Present | Egress Rules Present | `Types` value | -> | --------------------- | -------------------- | ----------------- | -> | No | No | `Ingress` | -> | Yes | No | `Ingress` | -> | No | Yes | `Egress` | -> | Yes | Yes | `Ingress, Egress` | - -\*\* The `doNotTrack` and `preDNAT` and `applyOnForward` fields are meaningful -only when applying policy to a [host endpoint](hostendpoint.mdx). - -Only one of `doNotTrack` and `preDNAT` may be set to `true` (in a given policy). If they are both `false`, or when applying the policy to a -[workload endpoint](workloadendpoint.mdx), -the policy is enforced after connection tracking and any DNAT. - -`applyOnForward` must be set to `true` if either `doNotTrack` or `preDNAT` is -`true` because for a given policy, any untracked rules or rules before DNAT will -in practice apply to forwarded traffic. - -See [Policy for hosts](../../network-policy/hosts/index.mdx) -for how `doNotTrack` and `preDNAT` and `applyOnForward` can be useful for host endpoints. - -### Rule - - - -### ICMP - - - -### EntityRule - - - -### Selectors - - - - -### Ports - - - -### ServiceAccountMatch - - - -### ServiceMatch - - - -## Application layer policy - -Application layer policy is an optional feature of {{prodname}} and -[must be enabled](../../network-policy/istio/app-layer-policy.mdx) -to use the following match criteria. - -:::note - -Application layer policy match criteria are supported with the following restrictions. - -- Only ingress policy is supported. Egress policy must not contain any application layer policy match clauses. -- Rules must have the action `Allow` if they contain application layer policy match clauses. - -::: - -### HTTPMatch - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| ------------------------ | ------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API datastore | Yes | Yes | Yes | diff --git a/calico_versioned_docs/version-3.25/reference/resources/globalnetworkset.mdx b/calico_versioned_docs/version-3.25/reference/resources/globalnetworkset.mdx deleted file mode 100644 index 8291a180c7..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/globalnetworkset.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Global network set - -A global network set resource (GlobalNetworkSet) represents an arbitrary set of IP subnetworks/CIDRs, -allowing it to be matched by {{prodname}} policy. Network sets are useful for applying policy to traffic -coming from (or going to) external, non-{{prodname}}, networks. - -The metadata for each network set includes a set of labels. When {{prodname}} is calculating the set of -IPs that should match a source/destination selector within a -[global network policy](globalnetworkpolicy.mdx) rule, or within a -[network policy](networkpolicy.mdx) rule whose `namespaceSelector` includes `global()`, it includes -the CIDRs from any network sets that match the selector. - -:::note - -Since {{prodname}} matches packets based on their source/destination IP addresses, -{{prodname}} rules may not behave as expected if there is NAT between the {{prodname}}-enabled node and the -networks listed in a network set. For example, in Kubernetes, incoming traffic via a service IP is -typically SNATed by the kube-proxy before reaching the destination host so {{prodname}}'s workload -policy will see the kube-proxy's host's IP as the source instead of the real source. - -::: - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: GlobalNetworkSet -metadata: - name: a-name-for-the-set - labels: - role: external-database -spec: - nets: - - 198.51.100.0/28 - - 203.0.113.0/24 -``` - -## Global network set definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ------ | ------------------------------------------ | ------------------------------------------------- | ------ | -| name | The name of this network set. | Lower-case alphanumeric with optional `-` or `-`. | string | -| labels | A set of labels to apply to this endpoint. | | map | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ----- | -------------------------------------------- | ------------------------------------------------------ | ------ | ------- | -| nets | The IP networks/CIDRs to include in the set. | Valid IPv4 or IPv6 CIDRs, for example "192.0.2.128/25" | list | | diff --git a/calico_versioned_docs/version-3.25/reference/resources/hostendpoint.mdx b/calico_versioned_docs/version-3.25/reference/resources/hostendpoint.mdx deleted file mode 100644 index e56e5ae0dc..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/hostendpoint.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Host endpoint - -import Endpointport from '@site/calico_versioned_docs/version-3.25/_includes/content/_endpointport.mdx'; - -A host endpoint resource (`HostEndpoint`) represents one or more real or virtual interfaces -attached to a host that is running {{prodname}}. It enforces {{prodname}} policy on -the traffic that is entering or leaving the host's default network namespace through those -interfaces. - -- A host endpoint with `interfaceName: *` represents _all_ of a host's real or virtual - interfaces. - -- A host endpoint for one specific real interface is configured by `interfaceName: `, - for example `interfaceName: eth0`, or by leaving `interfaceName` - empty and including one of the interface's IPs in `expectedIPs`. - -Each host endpoint may include a set of labels and list of profiles that {{prodname}} -will use to apply -[policy](networkpolicy.mdx) -to the interface. - -**Default behavior of external traffic to/from host** - -If a host endpoint is created and network policy is not in place, the {{prodname}} default is to deny traffic to/from that endpoint (except for traffic allowed by failsafe rules). -For a named host endpoint (i.e. a host endpoint representing a specific interface), {{prodname}} blocks traffic only to/from the interface specified in the host endpoint. Traffic to/from other interfaces is ignored. - -:::note - -Host endpoints with `interfaceName: *` do not support [untracked policy](../../network-policy/extreme-traffic/high-connection-workloads.mdx). - -::: - -For a wildcard host endpoint (i.e. a host endpoint representing all of a host's interfaces), {{prodname}} blocks traffic to/from _all_ interfaces on the host (except for traffic allowed by failsafe rules). - -However, profiles can be used in conjunction with host endpoints to modify default behavior of external traffic to/from the host in the absence of network policy. -{{prodname}} provides a default profile resource named `projectcalico-default-allow` that consists of allow-all ingress and egress rules. -Host endpoints with the `projectcalico-default-allow` profile attached will have "allow-all" semantics instead of "deny-all" in the absence of policy. - -Note: If you have custom iptables rules, using host endpoints with allow-all rules (with no policies) will accept all traffic and therefore bypass those custom rules. - -:::note - -Auto host endpoints specify the `projectcalico-default-allow` profile so they behave similarly to pod workload endpoints. - -::: - -:::note - -When rendering security rules on other hosts, {{prodname}} uses the -`expectedIPs` field to resolve label selectors to IP addresses. If the `expectedIPs` field -is omitted then security rules that use labels will fail to match this endpoint. - -::: - -**Host to local workload traffic**: Traffic from a host to its workload endpoints (e.g. Kubernetes pods) is always allowed, despite any policy in place. This ensures that `kubelet` liveness and readiness probes always work. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: HostEndpoint -metadata: - name: some.name - labels: - type: production -spec: - interfaceName: eth0 - node: myhost - expectedIPs: - - 192.168.0.1 - - 192.168.0.2 - profiles: - - profile1 - - profile2 - ports: - - name: some-port - port: 1234 - protocol: TCP - - name: another-port - port: 5432 - protocol: UDP -``` - -## Host endpoint definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ------ | ------------------------------------------ | --------------------------------------------------- | ------ | -| name | The name of this hostEndpoint. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | -| labels | A set of labels to apply to this endpoint. | | map | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | -------------------------------------------------------------------------- | -------------------------- | -------------------------------------- | ------- | -| node | The name of the node where this HostEndpoint resides. | | string | -| interfaceName | Either `*` or the name of the specific interface on which to apply policy. | | string | -| expectedIPs | The expected IP addresses associated with the interface. | Valid IPv4 or IPv6 address | list | -| profiles | The list of profiles to apply to the endpoint. | | list | -| ports | List of named ports that this workload exposes. | | List of [EndpointPorts](#endpointport) | - -### EndpointPort - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API server | Yes | Yes | Yes | diff --git a/calico_versioned_docs/version-3.25/reference/resources/index.mdx b/calico_versioned_docs/version-3.25/reference/resources/index.mdx deleted file mode 100644 index 2285c6bf97..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: APIs for all Calico networking and network policy resources. -hide_table_of_contents: true ---- - -# Resource definitions - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/resources/ipamconfig.mdx b/calico_versioned_docs/version-3.25/reference/resources/ipamconfig.mdx deleted file mode 100644 index c8737aac6b..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/ipamconfig.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -description: IP address management global configuration ---- - -# IPAM configuration - -An IPAM configuration resource (`IPAMConfiguration`) represents global IPAM configuration options. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPAMConfiguration -metadata: - name: default -spec: - strictAffinity: false - maxBlocksPerHost: 4 -``` - -## IPAM configuration definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | --------------- | ------ | -| name | Unique name to describe this resource instance. Required. | default | string | - -The resource is a singleton which must have the name `default`. - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------- | ------------------------------------------------------------------- | --------------- | ------ | --------- | -| strictAffinity | When StrictAffinity is true, borrowing IP addresses is not allowed. | true, false | bool | false | -| maxBlocksPerHost | The max number of blocks that can be affine to each host. | 0 - max(int32) | int | unlimited | - -## Supported operations - -| Datastore type | Create | Delete | Update | Get/List | -| --------------------- | ------ | ------ | ------ | -------- | -| etcdv3 | Yes | Yes | Yes | Yes | -| Kubernetes API server | Yes | Yes | Yes | Yes | diff --git a/calico_versioned_docs/version-3.25/reference/resources/ippool.mdx b/calico_versioned_docs/version-3.25/reference/resources/ippool.mdx deleted file mode 100644 index 0f846f645e..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/ippool.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# IP pool - -import Selectors from '@site/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx'; - -An IP pool resource (`IPPool`) represents a collection of IP addresses from which {{prodname}} expects -endpoint IPs to be assigned. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPPool -metadata: - name: my.ippool-1 -spec: - cidr: 10.1.0.0/16 - ipipMode: CrossSubnet - natOutgoing: true - disabled: false - nodeSelector: all() - allowedUses: - - Workload - - Tunnel -``` - -## IP pool definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | ------------------------------------------- | --------------------------------------------------- | ------ | -| name | The name of this IPPool resource. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | --------------------------------------------- | -| cidr | IP range to use for this pool. | A valid IPv4 or IPv6 CIDR. Subnet length must be at least big enough to fit a single block (by default `/26` for IPv4 or `/122` for IPv6). Must not overlap with the Link Local range `169.254.0.0/16` or `fe80::/10`. | string | | -| blockSize | The CIDR size of allocation blocks used by this pool. Blocks are allocated on demand to hosts and are used to aggregate routes. The value can only be set when the pool is created. | 20 to 32 (inclusive) for IPv4 and 116 to 128 (inclusive) for IPv6 | int | `26` for IPv4 pools and `122` for IPv6 pools. | -| ipipMode | The mode defining when IPIP will be used. Cannot be set at the same time as `vxlanMode`. | Always, CrossSubnet, Never | string | `Never` | -| vxlanMode | The mode defining when VXLAN will be used. Cannot be set at the same time as `ipipMode`. | Always, CrossSubnet, Never | string | `Never` | -| natOutgoing | When enabled, packets sent from {{prodname}} networked containers in this pool to destinations outside of any Calico IP pools will be masqueraded. | true, false | boolean | `false` | -| disabled | When set to true, {{prodname}} IPAM will not assign addresses from this pool. | true, false | boolean | `false` | -| disableBGPExport _(since v3.21.0)_ | Disable exporting routes from this IP Pool’s CIDR over BGP. | true, false | boolean | `false` | -| nodeSelector | Selects the nodes that {{prodname}} IPAM should assign addresses from this pool to. | | [selector](#node-selector) | all() | -| allowedUses _(since v3.21.0)_ | Controls whether the pool will be used for automatic assignments of certain types. See [below](#allowed-uses). | Workload, Tunnel | list of strings | `["Workload", "Tunnel"]` | - -:::note - -Do not use a custom `blockSize` until **all** {{prodname}} components have been updated to a version that -supports it (at least v3.3.0). Older versions of components do not understand the field so they may corrupt the -IP pool by creating blocks of incorrect size. - -::: - -### Allowed uses - -When automatically assigning IP addresses to workloads, only pools with "Workload" in their `allowedUses` field are -consulted. Similarly, when assigning IPs for tunnel devices, only "Tunnel" pools are eligible. - -If the `allowedUses` field is not specified, it defaults to `["Workload", "Tunnel"]` for compatibility with older -versions of Calico. It is not possible to specify a pool with no allowed uses. - -The `allowedUses` field is only consulted for new allocations, changing the field has no effect on previously allocated -addresses. - -{{prodname}} supports Kubernetes [annotations that force the use of specific IP addresses](../configure-cni-plugins.mdx#requesting-a-specific-ip-address). These annotations take precedence over the `allowedUses` field. - -### IPIP - -Routing of packets using IP-in-IP will be used when the destination IP address -is in an IP Pool that has IPIP enabled. In addition, if the `ipipMode` is set to `CrossSubnet`, -{{prodname}} will only route using IP-in-IP if the IP address of the destination node is in a different -subnet. The subnet of each node is configured on the node resource (which may be automatically -determined when running the `{{nodecontainer}}` service). - -For details on configuring IP-in-IP on your deployment, please refer to -[Configuring IP-in-IP](../../networking/configuring/vxlan-ipip.mdx). - -:::note - -Setting `natOutgoing` is recommended on any IP Pool with `ipip` enabled. -When `ipip` is enabled without `natOutgoing` routing between Workloads and -Hosts running {{prodname}} is asymmetric and may cause traffic to be filtered due to -[RPF](https://en.wikipedia.org/wiki/Reverse_path_forwarding) checks failing. - -::: - -### VXLAN - -Routing of packets using VXLAN will be used when the destination IP address -is in an IP Pool that has VXLAN enabled.. In addition, if the `vxlanMode` is set to `CrossSubnet`, -{{prodname}} will only route using VXLAN if the IP address of the destination node is in a different -subnet. The subnet of each node is configured on the node resource (which may be automatically -determined when running the `{{nodecontainer}}` service). - -:::note - -Setting `natOutgoing` is recommended on any IP Pool with `vxlan` enabled. -When `vxlan` is enabled without `natOutgoing` routing between Workloads and -Hosts running {{prodname}} is asymmetric and may cause traffic to be filtered due to -[RPF](https://en.wikipedia.org/wiki/Reverse_path_forwarding) checks failing. - -::: - -### Block sizes - -The default block sizes of `26` for IPv4 and `122` for IPv6 provide blocks of 64 addresses. This allows addresses to be allocated in groups to workloads running on the same host. By grouping addresses, fewer routes need to be exchanged between hosts and to other BGP peers. If a host allocates all of the addresses in a block then it will be allocated an additional block. If there are no more blocks available then the host can take addresses from blocks allocated to other hosts. Specific routes are added for the borrowed addresses which has an impact on route table size. - -Increasing the block size from the default (e.g., using `24` for IPv4 to give 256 addresses per block) means fewer blocks per host, and potentially fewer routes. But try to ensure that there are at least as many blocks in the pool as there are hosts. - -Reducing the block size from the default (e.g., using `28` for IPv4 to give 16 addresses per block) means more blocks per host and therefore potentially more routes. This can be beneficial if it allows the blocks to be more fairly distributed amongst the hosts. - -### Node Selector - -For details on configuring IP pool node selectors, please read the -[Assign IP addresses based on topology guide.](../../networking/ipam/assign-ip-addresses-topology.mdx). - -:::tip - -To prevent an IP pool from being used automatically by {{prodname}} IPAM, while still allowing -it to be used manually for static assignments, set the `IPPool`'s `nodeSelector` to `!all()`. Since the selector -matches no nodes, the IPPool will not be used automatically and, unlike setting `disabled: true`, it can still be -used for manual assignments. - -::: - -#### Selector reference - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API server | Yes | Yes | Yes | - -## See also - -The [`IPReservation` resource](ipreservation.mdx) allows for small parts of an IP pool to be reserved so that they will -not be used for automatic IPAM assignments. diff --git a/calico_versioned_docs/version-3.25/reference/resources/ipreservation.mdx b/calico_versioned_docs/version-3.25/reference/resources/ipreservation.mdx deleted file mode 100644 index 1be9294ec2..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/ipreservation.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# IP reservation - -An IP reservation resource (`IPReservation`) represents a collection of IP addresses that {{prodname}} should -not use when automatically assigning new IP addresses. It only applies when {{prodname}} IPAM is in use. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: IPReservation -metadata: - name: my-ipreservation-1 -spec: - reservedCIDRs: - - 192.168.2.3 - - 10.0.2.3/32 - - cafe:f00d::/123 -``` - -## IP reservation definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------------------------------------- | --------------------------------------------------- | ------ | -| name | The name of this IPReservation resource. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | --------------------------------------------------------------- | -------------------------------------------------- | ------ | ------- | -| reservedCIDRs | List of IP addresses and/or networks specified in CIDR notation | List of valid IP addresses (v4 or v6) and/or CIDRs | list | | - -### Notes - -The implementation of `IPReservation`s is designed to handle reservation of a small number of IP addresses/CIDRs from -(generally much larger) IP pools. If a significant portion of an IP pool is reserved (say more than 10%) then -{{prodname}} may become significantly slower when searching for free IPAM blocks. - -Since `IPReservations` must be consulted for every IPAM assignment request, it's best to have one or two -`IPReservation` resources with multiple addresses per `IPReservation` resource (rather than having many IPReservation -resources), each with one address inside. - -If an `IPReservation` is created after an IP from its range is already in use then the IP is not automatically -released back to the pool. The reservation check is only done at auto allocation time. - -{{prodname}} supports Kubernetes [annotations that force the use of specific IP addresses](../configure-cni-plugins.mdx#requesting-a-specific-ip-address). These annotations override any `IPReservation`s that -are in place. - -When Windows nodes claim blocks of IPs they automatically assign the first three IPs -in each block and the final IP for internal purposes. These assignments cannot be blocked by an `IPReservation`. -However, if a whole IPAM block is reserved with an `IPReservation`, Windows nodes will not claim such a block. diff --git a/calico_versioned_docs/version-3.25/reference/resources/kubecontrollersconfig.mdx b/calico_versioned_docs/version-3.25/reference/resources/kubecontrollersconfig.mdx deleted file mode 100644 index ed63db5d31..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/kubecontrollersconfig.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -description: API for KubeControllersConfiguration resource. ---- - -# Kubernetes controllers configuration - -A {{prodname}} [Kubernetes controllers](../kube-controllers/configuration.mdx) configuration resource (`KubeControllersConfiguration`) represents configuration options for the {{prodname}} Kubernetes controllers. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: KubeControllersConfiguration -metadata: - name: default -spec: - logSeverityScreen: Info - healthChecks: Enabled - etcdV3CompactionPeriod: 10m - prometheusMetricsPort: 9094 - controllers: - node: - reconcilerPeriod: 5m - leakGracePeriod: 15m - syncLabels: Enabled - hostEndpoint: - autoCreate: Disabled - policy: - reconcilerPeriod: 5m - workloadEndpoint: - reconcilerPeriod: 5m - serviceAccount: - reconcilerPeriod: 5m - namespace: - reconcilerPeriod: 5m -``` - -## Kubernetes controllers configuration definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | --------------------------------------------------------- | ----------------- | ------ | -| name | Unique name to describe this resource instance. Required. | Must be `default` | string | - -- {{prodname}} automatically creates a resource named `default` containing the configuration settings, only the name `default` is used and only one object of this type is allowed. You can use [calicoctl](../calicoctl/overview.mdx) to view and edit these settings - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ---------------------------------------------------------------------------------------------------------- | ----------------------------------- | --------------------------------- | ------- | -| logSeverityScreen | The log severity above which logs are sent to the stdout. | Debug, Info, Warning, Error, Fatal | string | Info | -| healthChecks | Enable support for health checks | Enabled, Disabled | string | Enabled | -| prometheusMetricsPort | Port on which to serve prometheus metrics. | Set to 0 to disable, > 0 to enable. | TCP port | 9094 | -| etcdV3CompactionPeriod | The period between etcdv3 compaction requests. Only applies when using etcd as the {{prodname}} datastore. | Set to 0 to disable, > 0 to enable | [Duration string][parse-duration] | 10m | -| controllers | Enabled controllers and their settings | | [Controllers](#controllers) | | - -### Controllers - -| Field | Description | Schema | -| ---------------- | ----------------------------------------------------- | ----------------------------------------------------------------------------- | -| node | Enable and configure the node controller | omit to disable, or [NodeController](#nodecontroller) | -| policy | Enable and configure the network policy controller | omit to disable, or [PolicyController](#policycontroller) | -| workloadEndpoint | Enable and configure the workload endpoint controller | omit to disable, or [WorkloadEndpointController](#workloadendpointcontroller) | -| serviceAccount | Enable and configure the service account controller | omit to disable, or [ServiceAccountController](#serviceaccountcontroller) | -| namespace | Enable and configure the namespace controller | omit to disable, or [NamespaceController](#namespacecontroller) | - -### NodeController - -The node controller automatically cleans up configuration for nodes that no longer exist. Optionally, it can create host endpoints for all Kubernetes nodes. - -| Field | Description | Accepted Values | Schema | Default | -| ---------------- | --------------------------------------------------------------------------------- | ----------------- | --------------------------------- | ------- | -| reconcilerPeriod | Period to perform reconciliation with the {{prodname}} datastore | | [Duration string][parse-duration] | 5m | -| syncLabels | When enabled, Kubernetes node labels will be copied to {{prodname}} node objects. | Enabled, Disabled | string | Enabled | -| hostEndpoint | Controls allocation of host endpoints | | [HostEndpoint](#hostendpoint) | | -| leakGracePeriod | Grace period to use when garbage collecting suspected leaked IP addresses. | | [Duration string][parse-duration] | 15m | - -### HostEndpoint - -| Field | Description | Accepted Values | Schema | Default | -| ---------- | ---------------------------------------------------------------- | ----------------- | ------ | -------- | -| autoCreate | When enabled, automatically create a host endpoint for each node | Enabled, Disabled | string | Disabled | - -### PolicyController - -The policy controller syncs Kubernetes network policies to the Calico datastore. This controller is only valid when using etcd as the {{prodname}} datastore. - -| Field | Description | Schema | Default | -| ---------------- | ---------------------------------------------------------------- | --------------------------------- | ------- | -| reconcilerPeriod | Period to perform reconciliation with the {{prodname}} datastore | [Duration string][parse-duration] | 5m | - -### WorkloadEndpointController - -The workload endpoint controller automatically syncs Kubernetes pod label changes to the {{prodname}} datastore by updating the corresponding workload -endpoints appropriately. This controller is only valid when using etcd as the {{prodname}} datastore. - -| Field | Description | Schema | Default | -| ---------------- | ---------------------------------------------------------------- | --------------------------------- | ------- | -| reconcilerPeriod | Period to perform reconciliation with the {{prodname}} datastore | [Duration string][parse-duration] | 5m | - -### ServiceAccountController - -The service account controller syncs Kubernetes service account changes to the {{prodname}} datastore. This controller is only valid when using etcd as -the {{prodname}} datastore. - -| Field | Description | Schema | Default | -| ---------------- | ---------------------------------------------------------------- | --------------------------------- | ------- | -| reconcilerPeriod | Period to perform reconciliation with the {{prodname}} datastore | [Duration string][parse-duration] | 5m | - -### NamespaceController - -The namespace controller syncs Kubernetes namespace label changes to the {{prodname}} datastore. This controller is only valid when using etcd as the -{{prodname}} datastore. - -| Field | Description | Schema | Default | -| ---------------- | ---------------------------------------------------------------- | --------------------------------- | ------- | -| reconcilerPeriod | Period to perform reconciliation with the {{prodname}} datastore | [Duration string][parse-duration] | 5m | - -## Supported operations - -| Datastore type | Create | Delete (Global `default`) | Update | Get/List | Notes | -| --------------------- | ------ | ------------------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | Yes | Yes | -| Kubernetes API server | Yes | Yes | Yes | Yes | - -[parse-duration]: https://golang.org/pkg/time/#ParseDuration diff --git a/calico_versioned_docs/version-3.25/reference/resources/networkpolicy.mdx b/calico_versioned_docs/version-3.25/reference/resources/networkpolicy.mdx deleted file mode 100644 index fd947d2353..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/networkpolicy.mdx +++ /dev/null @@ -1,151 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Network policy - -import Httpmatch from '@site/calico_versioned_docs/version-3.25/_includes/content/_httpmatch.mdx'; - -import Servicematch from '@site/calico_versioned_docs/version-3.25/_includes/content/_servicematch.mdx'; - -import Serviceaccountmatch from '@site/calico_versioned_docs/version-3.25/_includes/content/_serviceaccountmatch.mdx'; - -import Ports from '@site/calico_versioned_docs/version-3.25/_includes/content/_ports.mdx'; - -import SelectorScopes from '@site/calico_versioned_docs/version-3.25/_includes/content/_selector-scopes.mdx'; - -import Selectors from '@site/calico_versioned_docs/version-3.25/_includes/content/_selectors.mdx'; - -import Entityrule from '@site/calico_versioned_docs/version-3.25/_includes/content/_entityrule.mdx'; - -import Icmp from '@site/calico_versioned_docs/version-3.25/_includes/content/_icmp.mdx'; - -import Rule from '@site/calico_versioned_docs/version-3.25/_includes/content/_rule.mdx'; - -A network policy resource (`NetworkPolicy`) represents an ordered set of rules which are applied -to a collection of endpoints that match a [label selector](#selectors). - -`NetworkPolicy` is a namespaced resource. `NetworkPolicy` in a specific namespace -only applies to [workload endpoint resources](workloadendpoint.mdx) -in that namespace. Two resources are in the same namespace if the `namespace` -value is set the same on both. -See [global network policy resource](globalnetworkpolicy.mdx) for non-namespaced network policy. - -`NetworkPolicy` resources can be used to define network connectivity rules between groups of {{prodname}} endpoints and host endpoints, and -take precedence over [profile resources](profile.mdx) if any are defined. - -## Sample YAML - -This sample policy allows TCP traffic from `frontend` endpoints to port 6379 on -`database` endpoints. - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkPolicy -metadata: - name: allow-tcp-6379 - namespace: production -spec: - selector: role == 'database' - types: - - Ingress - - Egress - ingress: - - action: Allow - metadata: - annotations: - from: frontend - to: database - protocol: TCP - source: - selector: role == 'frontend' - destination: - ports: - - 6379 - egress: - - action: Allow -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | --------------------------------------------------- | ------ | --------- | -| name | The name of the network policy. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- | ---------------------- | --------------------------------------------- | -| order | Controls the order of precedence. {{prodname}} applies the policy with the lowest value first. | | float | | -| selector | Selects the endpoints to which this policy applies. | | [selector](#selectors) | all() | -| types | Applies the policy based on the direction of the traffic. To apply the policy to inbound traffic, set to `Ingress`. To apply the policy to outbound traffic, set to `Egress`. To apply the policy to both, set to `Ingress, Egress`. | `Ingress`, `Egress` | List of strings | Depends on presence of ingress/egress rules\* | -| ingress | Ordered list of ingress rules applied by policy. | | List of [Rule](#rule) | | -| egress | Ordered list of egress rules applied by this policy. | | List of [Rule](#rule) | | -| serviceAccountSelector | Selects the service account(s) to which this policy applies. Select a specific service account by name using the `projectcalico.org/name` label. | | [selector](#selectors) | all() | - -\* If `types` has no value, {{prodname}} defaults as follows. - -> | Ingress Rules Present | Egress Rules Present | `Types` value | -> | --------------------- | -------------------- | ----------------- | -> | No | No | `Ingress` | -> | Yes | No | `Ingress` | -> | No | Yes | `Egress` | -> | Yes | Yes | `Ingress, Egress` | - -### Rule - - - -### ICMP - - - -### EntityRule - - - -### Selectors - - - - -### Ports - - - -### ServiceAccountMatch - - - -### ServiceMatch - - - -## Application layer policy - -Application layer policy is an optional feature of {{prodname}} and -[must be enabled](../../network-policy/istio/app-layer-policy.mdx) -to use the following match criteria. - -:::note - -Application layer policy match criteria are supported with the following restrictions. - -- Only ingress policy is supported. Egress policy must not contain any application layer policy match clauses. -- Rules must have the action `Allow` if they contain application layer policy match clauses. - -::: - -### HTTPMatch - - - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| ------------------------ | ------------- | ------ | -------- | ----- | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API datastore | Yes | Yes | Yes | diff --git a/calico_versioned_docs/version-3.25/reference/resources/networkset.mdx b/calico_versioned_docs/version-3.25/reference/resources/networkset.mdx deleted file mode 100644 index ec7b8ded83..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/networkset.mdx +++ /dev/null @@ -1,63 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Network set - -A network set resource (NetworkSet) represents an arbitrary set of IP subnetworks/CIDRs, -allowing it to be matched by {{prodname}} policy. Network sets are useful for applying policy to traffic -coming from (or going to) external, non-{{prodname}}, networks. - -`NetworkSet` is a namespaced resource. `NetworkSets` in a specific namespace -only applies to [network policies](networkpolicy.mdx) -in that namespace. Two resources are in the same namespace if the `namespace` -value is set the same on both. (See [GlobalNetworkSet](globalnetworkset.mdx) for non-namespaced network sets.) - -The metadata for each network set includes a set of labels. When {{prodname}} is calculating the set of -IPs that should match a source/destination selector within a -[network policy](networkpolicy.mdx) rule, it includes -the CIDRs from any network sets that match the selector. - -:::note - -Since {{prodname}} matches packets based on their source/destination IP addresses, -{{prodname}} rules may not behave as expected if there is NAT between the {{prodname}}-enabled node and the -networks listed in a network set. For example, in Kubernetes, incoming traffic via a service IP is -typically SNATed by the kube-proxy before reaching the destination host so {{prodname}}'s workload -policy will see the kube-proxy's host's IP as the source instead of the real source. -For `calicoctl` commands that specify a resource type on the CLI, the following -aliases are supported (all case-insensitive): `networkset`, `networksets`, `netsets`. - -::: - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: NetworkSet -metadata: - name: external-database - namespace: staging - labels: - role: db -spec: - nets: - - 198.51.100.0/28 - - 203.0.113.0/24 -``` - -## Network set definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | ------------------------------------------------- | ------ | --------- | -| name | The name of this network set. Required. | Lower-case alphanumeric with optional `_` or `-`. | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | -| labels | A set of labels to apply to this endpoint. | | map | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ----- | -------------------------------------------- | ------------------------------------------------------ | ------ | ------- | -| nets | The IP networks/CIDRs to include in the set. | Valid IPv4 or IPv6 CIDRs, for example "192.0.2.128/25" | list | | diff --git a/calico_versioned_docs/version-3.25/reference/resources/node.mdx b/calico_versioned_docs/version-3.25/reference/resources/node.mdx deleted file mode 100644 index a5e4c05b07..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/node.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Node - -A node resource (`Node`) represents a node running {{prodname}}. When adding a host -to a {{prodname}} cluster, a node resource needs to be created which contains the -configuration for the `{{nodecontainer}}` instance running on the host. - -When starting a `{{nodecontainer}}` instance, the name supplied to the instance should -match the name configured in the Node resource. - -By default, starting a `{{nodecontainer}}` instance will automatically create a node resource -using the `hostname` of the compute host. - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: Node -metadata: - name: node-hostname -spec: - bgp: - asNumber: 64512 - ipv4Address: 10.244.0.1/24 - ipv6Address: 2001:db8:85a3::8a2e:370:7334/120 - ipv4IPIPTunnelAddr: 192.168.0.1 -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | -| ----- | -------------------------------- | --------------------------------------------------- | ------ | -| name | The name of this node. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------------- | ---------------------------- | ------- | -| bgp | BGP configuration for this node. Omit if using {{prodname}} for policy only. | | [BGP](#bgp) | -| ipv4VXLANTunnelAddr | IPv4 address of the VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| vxlanTunnelMACAddr | MAC address of the IPv4 VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| ipv6VXLANTunnelAddr | IPv6 address of the VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| vxlanTunnelMACAddrV6 | MAC address of the IPv6 VXLAN tunnel. This is system configured and should not be updated manually. | | string | -| orchRefs | Correlates this node to a node in another orchestrator. | | list of [OrchRefs](#OrchRef) | -| wireguard | WireGuard configuration for this node. This is applicable only if WireGuard is enabled in [Felix Configuration](felixconfig.mdx). | | [WireGuard](#wireguard) | - -### OrchRef - -| Field | Description | Accepted Values | Schema | Default | -| ------------ | ------------------------------------------------ | --------------- | ------ | ------- | -| nodeName | Name of this node according to the orchestrator. | | string | -| orchestrator | Name of the orchestrator. | k8s | string | - -### BGP - -| Field | Description | Accepted Values | Schema | Default | -| ----------------------- | -------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | ------- | -| asNumber | The AS Number of your `{{nodecontainer}}`. | Optional. If omitted the global value is used (see [example modifying Global BGP settings](../../networking/configuring/bgp.mdx) for details about modifying the `asNumber` setting). | integer | -| ipv4Address | The IPv4 address and subnet exported as the next-hop for the {{prodname}} endpoints on the host | The IPv4 address must be specified if BGP is enabled. | string | -| ipv6Address | The IPv6 address and subnet exported as the next-hop for the {{prodname}} endpoints on the host | Optional | string | -| ipv4IPIPTunnelAddr | IPv4 address of the IP-in-IP tunnel. This is system configured and should not be updated manually. | Optional IPv4 address | string | -| routeReflectorClusterID | Enables this node as a route reflector within the given cluster | Optional IPv4 address | string | - -### WireGuard - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | ----------------------------------------------------------------------------------------- | --------------- | ------ | ------- | -| interfaceIPv4Address | The IP address and subnet for the IPv4 WireGuard interface created by Felix on this node. | Optional | string | -| interfaceIPv6Address | The IP address and subnet for the IPv6 WireGuard interface created by Felix on this node. | Optional | string | - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ------------------------------------------------------------------ | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API server | No | Yes | Yes | `{{nodecontainer}}` data is directly tied to the Kubernetes nodes. | diff --git a/calico_versioned_docs/version-3.25/reference/resources/overview.mdx b/calico_versioned_docs/version-3.25/reference/resources/overview.mdx deleted file mode 100644 index 1ec057d3ad..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/overview.mdx +++ /dev/null @@ -1,101 +0,0 @@ ---- -description: Calico resources (APIs) that you can manage using calicoctl. ---- - -# Resource definitions - -This section describes the set of valid resource types that can be managed -through `calicoctl` or `kubectl`. - -While resources may be supplied in YAML or JSON format, this guide provides examples in YAML. - -## Overview of resource structure - -The calicoctl commands for resource management (create, apply, delete, replace, get) -all take resource manifests as input. - -Each manifest may contain a single resource -(e.g. a profile resource), or a list of multiple resources (e.g. a profile and two -hostEndpoint resources). - -The general structure of a single resource is as follows: - -```yaml noValidation -apiVersion: projectcalico.org/v3 -kind: -metadata: - # Identifying information - name: - ... -spec: - # Specification of the resource - ... -``` - -### Schema - -| Field | Description | Accepted Values | Schema | -| ---------- | --------------------------------------------------------------------------------------- | -------------------- | ------------------------ | -| apiVersion | Indicates the version of the API that the data corresponds to. | projectcalico.org/v3 | string | -| kind | Specifies the type of resource described by the YAML document. | | [kind](#supported-kinds) | -| metadata | Contains information used to uniquely identify the particular instance of the resource. | | map | -| spec | Contains the resource specification. | | map | - -### Supported kinds - -The following resources are supported: - -- [BGPConfiguration](bgpconfig.mdx) -- [BGPPeer](bgppeer.mdx) -- [FelixConfiguration](felixconfig.mdx) -- [GlobalNetworkPolicy](globalnetworkpolicy.mdx) -- [GlobalNetworkSet](globalnetworkset.mdx) -- [HostEndpoint](hostendpoint.mdx) -- [IPPool](ippool.mdx) -- [NetworkPolicy](networkpolicy.mdx) -- [NetworkSet](networkset.mdx) -- [Node](node.mdx) -- [Profile](profile.mdx) -- [WorkloadEndpoint](workloadendpoint.mdx) - -### Resource name requirements - -Every resource must have the `name` field specified. Name must be unique within a namespace. -Name required when creating resources, and cannot be updated. -A valid resource name can have alphanumeric characters with optional `.`, `_`, or `-`. of up to 128 characters total. - -### Multiple resources in a single file - -A file may contain multiple resource documents specified in a YAML list format. For example, the following is the contents of a file containing two `HostEndpoint` resources: - -```yaml -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: endpoint1 - labels: - type: database - spec: - interface: eth0 - node: host1 - profiles: - - prof1 - - prof2 - expectedIPs: - - 1.2.3.4 - - '00:bb::aa' -- apiVersion: projectcalico.org/v3 - kind: HostEndpoint - metadata: - name: endpoint2 - labels: - type: frontend - spec: - interface: eth1 - node: host1 - profiles: - - prof1 - - prof2 - expectedIPs: - - 1.2.3.5 -``` diff --git a/calico_versioned_docs/version-3.25/reference/resources/profile.mdx b/calico_versioned_docs/version-3.25/reference/resources/profile.mdx deleted file mode 100644 index 0d645fcb3b..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/profile.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Profile - -Profiles provide a way to group multiple endpoints so that they inherit a shared set of labels. For historic reasons, Profiles can also include -policy rules, but that feature is deprecated in favor of the much more -flexible [NetworkPolicy](networkpolicy.mdx) and -[GlobalNetworkPolicy](globalnetworkpolicy.mdx) resources. - -Each {{prodname}} endpoint or host endpoint can be assigned to zero or more profiles. - -## Sample YAML - -The following sample profile applies the label `stage: development` to any endpoint that includes `dev-apps` in its list of profiles. - -```yaml -apiVersion: projectcalico.org/v3 -kind: Profile -metadata: - name: dev-apps -spec: - labelsToApply: - stage: development -``` - -## Definition - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| ------ | ---------------------------------- | --------------------------------------------------- | ---------------------------------- | ------- | -| name | The name of the profile. Required. | Alphanumeric string with optional `.`, `_`, or `-`. | string | -| labels | A set of labels for this profile. | | map of string key to string values | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| -------------------- | -------------------------------------------------------------------------------------------------------------- | --------------- | -------------------------------------- | ------- | -| ingress (deprecated) | The ingress rules belonging to this profile. | | List of [Rule](networkpolicy.mdx#rule) | -| egress (deprecated) | The egress rules belonging to this profile. | | List of [Rule](networkpolicy.mdx#rule) | -| labelsToApply | An optional set of labels to apply to each endpoint in this profile (in addition to the endpoint's own labels) | | map | - -For `Rule` details please see the [NetworkPolicy](networkpolicy.mdx) or -[GlobalNetworkPolicy](globalnetworkpolicy.mdx) resource. - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | ------------------------------------------------------------------------------ | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API server | No | No | Yes | {{prodname}} profiles are pre-assigned for each Namespace and Service Account. | diff --git a/calico_versioned_docs/version-3.25/reference/resources/workloadendpoint.mdx b/calico_versioned_docs/version-3.25/reference/resources/workloadendpoint.mdx deleted file mode 100644 index fdf8c4649d..0000000000 --- a/calico_versioned_docs/version-3.25/reference/resources/workloadendpoint.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -description: API for this Calico resource. ---- - -# Workload endpoint - -import Ipnat from '@site/calico_versioned_docs/version-3.25/_includes/content/_ipnat.mdx'; - -A workload endpoint resource (`WorkloadEndpoint`) represents an interface -connecting a {{prodname}} networked container or VM to its host. - -Each endpoint may specify a set of labels and list of profiles that {{prodname}} will use -to apply policy to the interface. - -A workload endpoint is a namespaced resource, that means a -[NetworkPolicy](networkpolicy.mdx) -in a specific namespace only applies to the WorkloadEndpoint in that namespace. -Two resources are in the same namespace if the namespace value is set the same -on both. - -:::note - -While `calicoctl` allows the user to fully manage Workload Endpoint resources, -the lifecycle of these resources is generally handled by an orchestrator-specific -plugin such as the {{prodname}} CNI plugin, the {{prodname}} Docker network plugin, -or the {{prodname}} OpenStack Neutron Driver. In general, we recommend that you only -use `calicoctl` to view this resource type. - -::: - -## Sample YAML - -```yaml -apiVersion: projectcalico.org/v3 -kind: WorkloadEndpoint -metadata: - name: node1-k8s-my--nginx--b1337a-eth0 - namespace: default - labels: - app: frontend - projectcalico.org/namespace: default - projectcalico.org/orchestrator: k8s -spec: - node: node1 - orchestrator: k8s - endpoint: eth0 - containerID: 1337495556942031415926535 - pod: my-nginx-b1337a - endpoint: eth0 - interfaceName: cali0ef24ba - mac: ca:fe:1d:52:bb:e9 - ipNetworks: - - 192.168.0.0/32 - profiles: - - profile1 - ports: - - name: some-port - port: 1234 - protocol: TCP - - name: another-port - port: 5432 - protocol: UDP -``` - -## Definitions - -### Metadata - -| Field | Description | Accepted Values | Schema | Default | -| --------- | ------------------------------------------------------------------ | -------------------------------------------------- | ------ | --------- | -| name | The name of this workload endpoint resource. Required. | Alphanumeric string with optional `.`, `_`, or `-` | string | | -| namespace | Namespace provides an additional qualification to a resource name. | | string | "default" | -| labels | A set of labels to apply to this endpoint. | | map | | - -### Spec - -| Field | Description | Accepted Values | Schema | Default | -| ------------- | ------------------------------------------------------------- | --------------- | ---------------------------------------------- | ------- | -| workload | The name of the workload to which this endpoint belongs. | | string | -| orchestrator | The orchestrator that created this endpoint. | | string | -| node | The node where this endpoint resides. | | string | -| containerID | The CNI CONTAINER_ID of the workload endpoint. | | string | -| pod | Kubernetes pod name for this workload endpoint. | | string | -| endpoint | Container network interface name. | | string | -| ipNetworks | The CIDRs assigned to the interface. | | List of strings | -| ipNATs | List of 1:1 NAT mappings to apply to the endpoint. | | List of [IPNATs](#ipnat) | -| ipv4Gateway | The gateway IPv4 address for traffic from the workload. | | string | -| ipv6Gateway | The gateway IPv6 address for traffic from the workload. | | string | -| profiles | List of profiles assigned to this endpoint. | | List of strings | -| interfaceName | The name of the host-side interface attached to the workload. | | string | -| mac | The source MAC address of traffic generated by the workload. | | IEEE 802 MAC-48, EUI-48, or EUI-64 | -| ports | List on named ports that this workload exposes. | | List of [WorkloadEndpointPorts](#endpointport) | - -### IPNAT - - - -### EndpointPort - -A WorkloadEndpointPort associates a name with a particular TCP/UDP/SCTP port of the endpoint, allowing it to -be referenced as a named port in [policy rules](networkpolicy.mdx#entityrule). - -| Field | Description | Accepted Values | Schema | Default | -| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------- | ------ | ------- | -| name | The name to attach to this port, allowing it to be referred to in [policy rules](networkpolicy.mdx#entityrule). Names must be unique within an endpoint. | | string | | -| protocol | The protocol of this named port. | `TCP`, `UDP`, `SCTP` | string | | -| port | The workload port number. | `1`-`65535` | int | | -| hostPort | Port on the host that is forwarded to this port. | `1`-`65535` | int | | -| hostIP | IP address on the host on which the hostPort is accessible. | `1`-`65535` | int | | - -:::note - -On their own, WorkloadEndpointPort entries don't result in any change to the connectivity of the port. -They only have an effect if they are referred to in policy. - -::: - -:::note - -The hostPort and hostIP fields are read-only and determined from Kubernetes hostPort configuration. -These fields are used only when host ports are enabled in Calico. - -::: - -## Supported operations - -| Datastore type | Create/Delete | Update | Get/List | Notes | -| --------------------- | ------------- | ------ | -------- | -------------------------------------------------------- | -| etcdv3 | Yes | Yes | Yes | -| Kubernetes API server | No | Yes | Yes | WorkloadEndpoints are directly tied to a Kubernetes pod. | diff --git a/calico_versioned_docs/version-3.25/reference/rest-api-reference.mdx b/calico_versioned_docs/version-3.25/reference/rest-api-reference.mdx deleted file mode 100644 index 13b5395dee..0000000000 --- a/calico_versioned_docs/version-3.25/reference/rest-api-reference.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -description: REST API reference ---- - -# REST API Reference - -import SwaggerUI from 'swagger-ui-react'; - - - - diff --git a/calico_versioned_docs/version-3.25/reference/typha/configuration.mdx b/calico_versioned_docs/version-3.25/reference/typha/configuration.mdx deleted file mode 100644 index 518df36c6f..0000000000 --- a/calico_versioned_docs/version-3.25/reference/typha/configuration.mdx +++ /dev/null @@ -1,99 +0,0 @@ ---- -description: Configure Typha for scaling Kubernetes API datastore (kdd). ---- - -# Configuring Typha - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - - - -Typha configuration cannot be modified when Calico is installed via the operator. - - - - -Configuration for Typha is read from one of two possible locations, in -order, as follows. - -1. Environment variables, prefixed with `TYPHA_`. - -2. The Typha configuration file. The path to this file defaults to - `/etc/calico/typha.cfg` but can be overridden using the `-c` or - `--config-file` options on the command line. - -The value of any configuration parameter is the value read from the -_first_ location containing a value. For example, if an environment variable -contains a value, it takes precedence. - -If not set in any of these locations, most configuration parameters have -defaults, and it should be rare to have to explicitly set them. - -The full list of parameters which can be set is as follows. - -### General configuration - -| Configuration parameter | Environment variable | Description | Schema | -| --------------------------------------- | --------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------- | -| `DatastoreType` | `TYPHA_DATASTORETYPE` | The datastore that Typha should read endpoints and policy information from. [Default: `etcdv3`] | `etcdv3`, `kubernetes` | -| `HealthEnabled` | `TYPHA_HEALTHENABLED` | When enabled, exposes Typha health information via an http endpoint. | boolean | -| `HealthPort` | `TYPHA_HEALTHPORT` | The port that Typha will serve health information over. [Default: `9098`] | int | -| `HealthHost` | `TYPHA_HEALTHHOST` | The address that Typha will bind its health endpoint to. [Default: `localhost`] | string | -| `LogFilePath` | `TYPHA_LOGFILEPATH` | The full path to the Typha log. Set to `none` to disable file logging. [Default: `/var/log/calico/typha.log`] | string | -| `LogSeverityFile` | `TYPHA_LOGSEVERITYFILE` | The log severity above which logs are sent to the log file. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeverityScreen` | `TYPHA_LOGSEVERITYSCREEN` | The log severity above which logs are sent to the stdout. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `LogSeveritySys` | `TYPHA_LOGSEVERITYSYS` | The log severity above which logs are sent to the syslog. Set to `""` for no logging to syslog. [Default: `Info`] | `Debug`, `Info`, `Warning`, `Error`, `Fatal` | -| `PrometheusGoMetricsEnabled` | `TYPHA_PROMETHEUSGOMETRICSENABLED` | Set to `false` to disable Go runtime metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `PrometheusMetricsEnabled` | `TYPHA_PROMETHEUSMETRICSENABLED` | Set to `true` to enable the Prometheus metrics server in Typha. [Default: `false`] | boolean | -| `PrometheusMetricsHost` | `TYPHA_PROMETHEUSMETRICSHOST` | TCP network address that the Prometheus metrics server should bind to. [Default: `""`] | string | -| `PrometheusMetricsPort` | `TYPHA_PROMETHEUSMETRICSPORT` | TCP port that the Prometheus metrics server should bind to. [Default: `9091`] | int | -| `PrometheusProcessMetricsEnabled` | `TYPHA_PROMETHEUSPROCESSMETRICSENABLED` | Set to `false` to disable process metrics collection, which the Prometheus client does by default. This reduces the number of metrics reported, reducing Prometheus load. [Default: `true`] | boolean | -| `ShutdownTimeoutSecs` | `TYPHA_SHUTDOWNTIMEOUTSECS` | Maximum time that Typha should take to do a graceful shut down. In Kubernetes, this should match Typha's `terminationGracePeriodSeconds`. | int | -| `ShutdownConnectionDropIntervalMaxSecs` | `TYPHA_SHUTDOWNCONNECTIONDROPINTERVALMAXSECS` | Maximum time between terminating two connections when doing a graceful shutdown. Prevents very slow shut downs if `ShutdownTimeoutSecs` is large but Typha only has a small number of clients. | int | - -:::note - -By default, if the health endpoint is enabled Typha listens on localhost. However, if Typha is used in -Kubernetes, the kubelet will do health checks using the pod IP. To work around this discrepancy, the Typha image -supports a health-check CLI command that fetches the health endpoint: -`calico-typha check (readiness|liveness) --port=`. If you modify the health port, you will need to add the -`--port=` argument to the liveness and readiness probe commands in the manifest. - -::: - -### etcd datastore configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------- | -| `EtcdCaFile` | `TYPHA_ETCDCAFILE` | Path to the file containing the root certificate of the certificate authority (CA) that issued the etcd server certificate. Configures Typha to trust the CA that signed the root certificate. The file may contain multiple root certificates, causing Typha to trust each of the CAs included. To disable authentication of the server by Typha, set the value to `none`. [Default: `/etc/ssl/certs/ca-certificates.crt`] | string | -| `EtcdCertFile` | `TYPHA_ETCDCERTFILE` | Path to the file containing the client certificate issued to Typha. Enables Typha to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/typha/cert.pem` (optional) | string | -| `EtcdEndpoints` | `TYPHA_ETCDENDPOINTS` | Comma-delimited list of etcd endpoints to connect to. Example: `http://127.0.0.1:2379,http://127.0.0.2:2379`. | `://:` | -| `EtcdKeyFile` | `TYPHA_ETCDKEYFILE` | Path to the file containing the private key matching the Typha client certificate. Enables Typha to participate in mutual TLS authentication and identify itself to the etcd server. Example: `/etc/typha/key.pem` (optional) | string | - -### Kubernetes API datastore configuration - -The Kubernetes API datastore driver reads its configuration from Kubernetes-provided environment variables. - -#### Environment variables - -| Environment | Description | Schema | -| ------------ | --------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -| USE_POD_CIDR | Use the Kubernetes `Node.Spec.PodCIDR` field. This field is required when using the Kubernetes API datastore with host-local IPAM. [Default: false] | boolean | - -### Felix-Typha TLS configuration - -| Configuration parameter | Environment variable | Description | Schema | -| ----------------------- | ---------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------ | -| `CAFile` | `TYPHA_CAFILE` | Path to the file containing the root certificate of the CA that issued the Felix client certificate. Configures Typha to trust the CA that signed the Felix client certificate. The file may contain multiple root certificates, causing Typha to trust each of the CAs included. Example: `/etc/typha/ca.pem` | string | -| `ClientCN` | `TYPHA_CLIENTCN` | If set, the `Common Name` that Felix's certificate must have. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `ClientURISAN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | -| `ClientURISAN` | `TYPHA_CLIENTURISAN` | If set, a URI SAN that Felix's certificate must have. We recommend populating this with a [SPIFFE](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#2-spiffe-identity) string that identifies Felix. All Felix instances should use the same SPIFFE ID. If you have enabled TLS on the communications from Felix to Typha, you must set a value here or in `ClientCN`. You can set values in both, as well, such as to facilitate a migration from using one to the other. If either matches, the communication succeeds. [Default: none] | string | -| `ServerCertFile` | `TYPHA_SERVERCERTFILE` | Path to the file containing the server certificate issued to Typha. Typha presents this to Felix clients during the TLS handshake. Example: `/etc/typha/cert.pem` | string | -| `ServerKeyFile` | `TYPHA_SERVERKEYFILE` | Path to the file containing the private key matching the Typha server certificate. Example: `/etc/typha/key.pem` (optional) | string | - -For more information on how to use and set these variables, refer to -[Connections from Felix to Typha (Kubernetes)](../../network-policy/comms/crypto-auth.mdx#connections-from-felix-to-typha-kubernetes). - - - diff --git a/calico_versioned_docs/version-3.25/reference/typha/index.mdx b/calico_versioned_docs/version-3.25/reference/typha/index.mdx deleted file mode 100644 index caea3ab03c..0000000000 --- a/calico_versioned_docs/version-3.25/reference/typha/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Typha is the component for scaling Kubernetes deployments. -hide_table_of_contents: true ---- - -# Typha - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/typha/overview.mdx b/calico_versioned_docs/version-3.25/reference/typha/overview.mdx deleted file mode 100644 index dc5a068e28..0000000000 --- a/calico_versioned_docs/version-3.25/reference/typha/overview.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -description: Use the Calico Typha daemon to increase scale and reduce impact on the datastore. ---- - -# Typha overview - -The Typha daemon sits between the datastore (such as the Kubernetes API server) and many instances of Felix. Typha's main purpose is to increase scale by reducing each node's impact on the datastore. Services such as [Felix](https://github.com/projectcalico/felix) and [confd](https://github.com/projectcalico/confd) connect to Typha instead of connecting directly to the datastore as Typha maintains a single datastore connection on behalf of all its clients. It caches the datastore state and deduplicates events so that they can be fanned out to many listeners. - -:::note - -If you are using the Kubernetes API Datastore, we recommend using Typha. Although Typha can be used with etcd, etcd v3 is already optimized to handle many clients so using it is redundant and not recommended. Operator installations always install Typha. - -::: - -## Advantages - -- Since one Typha instance can support hundreds of Felix instances, it reduces the load on the datastore by a large factor. -- Since Typha can filter out updates that are not relevant to Felix, it also reduces Felix's CPU usage. In a high-scale (100+ node) Kubernetes cluster, this is essential because the number of updates generated by the API server scales with the number of nodes. diff --git a/calico_versioned_docs/version-3.25/reference/typha/prometheus.mdx b/calico_versioned_docs/version-3.25/reference/typha/prometheus.mdx deleted file mode 100644 index 846af9f455..0000000000 --- a/calico_versioned_docs/version-3.25/reference/typha/prometheus.mdx +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Review metrics for the Typha component if you are using Prometheus. ---- - -# Prometheus metrics - -Typha can be configured to report a number of metrics through Prometheus. See the -[configuration reference](configuration.mdx) for how to enable metrics reporting. - -## Metric reference - -#### Typha specific - -Typha exports a number of Prometheus metrics. The current set is as follows. Since some metrics -are tied to particular implementation choices inside Typha we can't make any hard guarantees that -metrics will persist across releases. However, we aim not to make any spurious changes to -existing metrics. - -##### Terminology - -**Syncer:** Many of Typha's metrics are now parameterised by "syncer type"; Typha runs one "syncer" for each -type of client that it supports. The "syncer" is the component that synchronises Typha's local cache -of the datastore with the upstream datastore. The syncer type is attached to the metrics via a -Prometheus label `syncer="..."`. - -**Breadcrumb:** Typha's internal cache stores a series of snapshots of the state of the datastore along with -a list of changes when compared to the previous snapshot. We call the combination of a snapshot and the list of -changes a "breadcrumb". Breadcrumbs are linked together into a linked list as they are created. When a client -connects, Typha sends the snapshot from the most recent breadcrumb to the client; then, it "follows the breadcrumbs" -on behalf of that client, sending it the change list from each breadcrumb. - -| Name | Description | -| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `typha_cache_size` | The total number of key/value pairs in Typha's in-memory cache. | -| `typha_snapshots_generated` | The total number of binary snapshots generated by Typha. Binary snapshots are generated once and then shared between multiple clients for performance. | -| `typha_snapshots_reused` | The number of binary snapshots that Typha was able to reuse for multiple clients, thus reducing CPU usage. | -| `typha_snapshot_raw_bytes` | The size of the most recent binary snapshot in bytes pre-compression. | -| `typha_snapshot_compressed_bytes` | The size of the most recent binary snapshot in bytes post-compression. | -| `typha_breadcrumb_block` | Count of the number of times Typha got the next Breadcrumb after blocking. | -| `typha_breadcrumb_non_block` | typha_breadcrumb_non_block Count of the number of times Typha got the next Breadcrumb without blocking. | -| `typha_breadcrumb_seq_number` | Current (server-local) sequence number; number of snapshot deltas processed. | -| `typha_breadcrumb_size` | Number of KVs recorded in each breadcrumb. | -| `typha_client_latency_secs` | Per-client latency. I.e. how far behind the current state is each client. | -| `typha_client_snapshot_send_secs` | How long it took to send the initial snapshot to each client. | -| `typha_client_write_latency_secs` | Per-client write. How long each write call is taking. | -| `typha_connections_accepted` | Total number of connections accepted over time. | -| `typha_connections_active` | Number of open client connections (including connections that have not completed the handshake). | -| `typha_connections_streaming` | Number of client connections that are actively streaming (i.e. connections that successfully completed the handshake). | -| `typha_connections_dropped` | Total number of connections dropped due to rebalancing. | -| `typha_kvs_per_msg` | Number of KV pairs sent in each message. | -| `typha_log_errors` | Number of errors encountered while logging. | -| `typha_logs_dropped` | Number of logs dropped because the output stream was blocked. | -| `typha_next_breadcrumb_latency_secs` | Time to retrieve next breadcrumb when already behind. | -| `typha_ping_latency` | Round-trip ping/pong latency to client. Typha's protocol includes a regular ping/pong keepalive to verify that the connection is still up. | -| `typha_updates_skipped` | Total number of updates skipped because the datastore change was not relevant. (For example, an update to a Kubernetes Pod field that {{prodname}} does not read.) | -| `typha_updates_total` | Total number of updates received from the datastore. | -| `remote_cluster_connection_status` | Status of the remote cluster connection in federation. Represented as numeric values 0 (NotConnecting), 1 (Connecting), 2 (InSync), 3 (ReSyncInProgress), 4 (ConfigChangeRestartRequired), 5 (ConfigInComplete). Uses `remote_cluster_name` label to represent name of the remote cluster in federation.| - -Prometheus metrics are self-documenting, with metrics turned on, `curl` can be used to list the -metrics along with their help text and type information. - -```bash -curl -s http://localhost:9091/metrics | head -``` - -Example response: - -``` -# HELP typha_breadcrumb_block Count of the number of times Typha got the next Breadcrumb after blocking. -# TYPE typha_breadcrumb_block counter -typha_breadcrumb_block 57 -# HELP typha_breadcrumb_non_block Count of the number of times Typha got the next Breadcrumb without blocking. -# TYPE typha_breadcrumb_non_block counter -typha_breadcrumb_non_block 0 -# HELP typha_breadcrumb_seq_number Current (server-local) sequence number; number of snapshot deltas processed. -# TYPE typha_breadcrumb_seq_number gauge -typha_breadcrumb_seq_number 22215 -... -``` - -#### CPU / memory metrics - -Typha also exports the default set of metrics that Prometheus makes available. Currently, those -include: - -| Name | Description | -| -------------------------------------------- | ------------------------------------------------------------------ | -| `go_gc_duration_seconds` | A summary of the GC invocation durations. | -| `go_goroutines` | Number of goroutines that currently exist. | -| `go_memstats_alloc_bytes` | Number of bytes allocated and still in use. | -| `go_memstats_alloc_bytes_total` | Total number of bytes allocated, even if freed. | -| `go_memstats_buck_hash_sys_bytes` | Number of bytes used by the profiling bucket hash table. | -| `go_memstats_frees_total` | Total number of frees. | -| `go_memstats_gc_sys_bytes` | Number of bytes used for garbage collection system metadata. | -| `go_memstats_heap_alloc_bytes` | Number of heap bytes allocated and still in use. | -| `go_memstats_heap_idle_bytes` | Number of heap bytes waiting to be used. | -| `go_memstats_heap_inuse_bytes` | Number of heap bytes that are in use. | -| `go_memstats_heap_objects` | Number of allocated objects. | -| `go_memstats_heap_released_bytes_total` | Total number of heap bytes released to OS. | -| `go_memstats_heap_sys_bytes` | Number of heap bytes obtained from system. | -| `go_memstats_last_gc_time_seconds` | Number of seconds since 1970 of last garbage collection. | -| `go_memstats_lookups_total` | Total number of pointer lookups. | -| `go_memstats_mallocs_total` | Total number of mallocs. | -| `go_memstats_mcache_inuse_bytes` | Number of bytes in use by mcache structures. | -| `go_memstats_mcache_sys_bytes` | Number of bytes used for mcache structures obtained from system. | -| `go_memstats_mspan_inuse_bytes` | Number of bytes in use by mspan structures. | -| `go_memstats_mspan_sys_bytes` | Number of bytes used for mspan structures obtained from system. | -| `go_memstats_next_gc_bytes` | Number of heap bytes when next garbage collection will take place. | -| `go_memstats_other_sys_bytes` | Number of bytes used for other system allocations. | -| `go_memstats_stack_inuse_bytes` | Number of bytes in use by the stack allocator. | -| `go_memstats_stack_sys_bytes` | Number of bytes obtained from system for stack allocator. | -| `go_memstats_sys_bytes` | Number of bytes obtained by system. Sum of all system allocations. | -| `process_cpu_seconds_total` | Total user and system CPU time spent in seconds. | -| `process_max_fds` | Maximum number of open file descriptors. | -| `process_open_fds` | Number of open file descriptors. | -| `process_resident_memory_bytes` | Resident memory size in bytes. | -| `process_start_time_seconds` | Start time of the process since unix epoch in seconds. | -| `process_virtual_memory_bytes` | Virtual memory size in bytes. | -| `promhttp_metric_handler_requests_in_flight` | Current number of scrapes being served. | -| `promhttp_metric_handler_requests_total` | Total number of scrapes by HTTP status code. | diff --git a/calico_versioned_docs/version-3.25/reference/vpp/host-network.mdx b/calico_versioned_docs/version-3.25/reference/vpp/host-network.mdx deleted file mode 100644 index 59a8e7d5c1..0000000000 --- a/calico_versioned_docs/version-3.25/reference/vpp/host-network.mdx +++ /dev/null @@ -1,103 +0,0 @@ ---- -description: Description of the host network configuration performed by VPP. ---- - -# Host network configuration - -## Big picture - -The VPP-Host connection is a bit particular in the sense that the address of the primary interface is shared by both VPP and Linux. -Let's say the primary interface (uplink interface on the diagram below) is called `enp216s0f1` and configured with an `192.168.0.1/24` address. - -This address must also be the one the api-server will be listening on, as the first address of the uplink (a.k.a main interface for the host). - -![Network architecture](/img/calico/vpp-host-net.svg) - -## When VPP starts - -- It grabs the primary interface with the chosen driver, either placing it in a dedicated network namespace, or removing it entirely as a Linux netdev depending on the driver. In any case, the interface disappears from the host's root network namespace -- It configures it in VPP with the same configuration (addresses, routes) it had in Linux - -So issuing `show int addr` in VPP will give something like - -``` -vpp sh int addr -avf-0/d8/a/0 (up): - L3 192.168.0.1/24 -``` - -- It creates a `tap` interface between VPP and the host -- This tap interface is given the same name and MAC address as the original interface in the host's root network namespace -- This interface is also reconfigured with all the addresses and routes that the host had configured on the original interface - -```bash -ip addr show enp216s0f1 -3: enp216s0f1: mtu 1500 qdisc mq state UP group default qlen 1000 - link/ether 52:54:00:40:46:8e brd ff:ff:ff:ff:ff:ff - inet 192.168.0.1/24 brd 192.168.0.255 scope global eth1 - valid_lft forever preferred_lft forever - -ip route -default via 192.168.0.254 dev enp216s0f1 proto dhcp src 192.168.0.1 metric 100 -${SERVICE_CIDR} via 192.168.0.254 dev enp216s0f1 proto static mtu 1440 -${POD_CIDR} via 192.168.0.254 dev enp216s0f1 proto static mtu 1440 -192.168.0.0/24 dev enp216s0f1 proto kernel scope link src 192.168.0.254 -``` - -The new `tap` interface (named `enp216s0f1` in this example) is also configured with routes towards the Kubernetes service CIDR and the pod CIDR, so that the Linux host can reach the workloads through VPP. These routes use a reduced MTU to accommodate for encapsulations. - -- In VPP you will find it with the name `tap0` - - It is configured unnumbered, as a child of the primary interface `avf-0/d8/a/0` - -``` -vpp sh int addr -tun0 (up): - unnumbered, use avf-0/d8/a/0 - L3 192.168.0.1/24 -``` - -- It is also registered as the default punt path - - This means that all the traffic that would be dropped by VPP (which includes the traffic to the VPP address that is not handled by VPP itself, but not the tunnel traffic which is decapsulated / decrypted by VPP) will be passed to this interface, so the Linux host will receive and process it - -``` -vpp sh ip punt redirect - rx local0 via: - path-list:[31] locks:1 flags:no-uRPF, uPRF-list:24 len:1 itfs:[2, ] - path:[41] pl-index:31 ip4 weight=1 pref=0 attached-nexthop: oper-flags:resolved, - 169.254.0.1 tap0 - [@0]: ipv4 via 169.254.0.1 tap0: mtu:1500 next:6 flags:[features ] 52540040468e02ca11c0fd100800 - forwarding - [@1]: ipv4 via 169.254.0.1 tap0: mtu:1500 next:6 flags:[features ] 52540040468e02ca11c0fd100800 - For the v6 configuration, use -vpp sh ip6 punt redirect -``` - -## Packet flow - incoming packet on the uplink - -A packet destined to the host arrives on the uplink, let's say it has `src=192.168.0.2,dst=192.168.0.1` - -- VPP receives it, sees that the destination address is the one configured on its interface -- As it doesn't have specific handling configured for this packet, it looks up the punt path, and sends it into `tap0` -- Linux receives it on the tap interface which is configured with `192.168.0.1/32` and so processes it normally - -The reply is now emitted by the host with `src=192.168.0.1,dst=192.168.0.2` - -- Linux looks up the route for `192.168.0.0/24`, forwards it into the tap interface -- VPP receives it on the interface `tap0` -- It looks up `192.168.0.2` in the fib, finds a route on the uplink interface (configured with `192.168.0.1/24`) -- The packet is sent on the uplink - -## Packet flow - pod talking to the api-server - -Let's say the api server lives on `Node A`. - -If a pod on `Node B` wants to talk to the api-server on `Node A`, the packet flow will be the same as described above. - -- In `Node B`, standard routing happens (`sh ip fib ` will give details on this node) -- When reaching `Node A`, we're in the same situation as previously - -If a pod on `Node A` wants to talk to the api-server on `Node A`, let's say the packet is `src=10.0.0.1,dst=192.168.0.1` with `10.0.0.1` being the pod address, then: - -- Things happen exactly the same way as if the packet was coming from the uplink -- This time packets come into vpp through `tunN` corresponding to the pod interface, and is then punted to `tap0` towards the host -- The return traffic from the host is received by VPP on `tap0`, and is routed directly to the pod on `tunN` diff --git a/calico_versioned_docs/version-3.25/reference/vpp/index.mdx b/calico_versioned_docs/version-3.25/reference/vpp/index.mdx deleted file mode 100644 index ed0beb9331..0000000000 --- a/calico_versioned_docs/version-3.25/reference/vpp/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -description: Configuration settings and architecture of the VPP dataplane. -hide_table_of_contents: true ---- - -# VPP dataplane - -import DocCardList from '@theme/DocCardList'; -import { useCurrentSidebarCategory } from '@docusaurus/theme-common'; - - diff --git a/calico_versioned_docs/version-3.25/reference/vpp/technical-details.mdx b/calico_versioned_docs/version-3.25/reference/vpp/technical-details.mdx deleted file mode 100644 index 19553fe8fb..0000000000 --- a/calico_versioned_docs/version-3.25/reference/vpp/technical-details.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -description: Technical details on the VPP dataplane integration. ---- - -# VPP dataplane implementation details - -## Software architecture - -The VPP dataplane integration is split in two components, `vpp-manager` which handles the VPP startup configuration and lifecycle; and `calico-vpp-agent` which is responsible for all the runtime configuration of VPP for {{prodname}}. Both processes run in separate containers in the calico-vpp-node pod, which runs in the host's root network namespace. - -![Implementation architecture](/img/calico/vpp-soft-arch.svg) - -### vpp-manager - -VPP Manager is a very light process responsible for the bootstrap of VPP, including uplink interface addressing and routing configuration. It also restores the Linux configuration on shutdown. The code can be found in this directory: [https://github.com/projectcalico/vpp-dataplane/tree/{{vppbranch}}/vpp-manager](https://github.com/projectcalico/vpp-dataplane/tree/{{vppbranch}}/vpp-manager). - -On startup, vpp-manager starts by determining the desired configuration for the VPP uplink by checking it's configuration in Linux, including addresses and routes. It then renders an appropriate configuration file for VPP, and starts the VPP process. - -Once VPP is running, vpp-manager connects to it using the API and starts configuring the uplink interface as it was configured in Linux. Once this is done, it configures a tap interface in the host to restore its connectivity. - -Once it is running, vpp-manager forwards all received Unix signals to VPP to handle stops gracefully. - -When VPP stops, either in reaction to a received signal or in case of a crash, vpp-manager restores the configuration of the Linux interface so that the host recovers its connectivity to the outside through the original uplink interface. - -vpp-manager is voluntarily kept as simple as possible, to minimize the risk of bugs, as these could leave the host without connectivity, requiring a reboot. - -### calico-vpp-agent - -The {{prodname}} VPP agent is the process responsible for all the {{prodname}}-specific configuration in VPP. Its code lives in this directory: [https://github.com/projectcalico/vpp-dataplane/tree/{{vppbranch}}/calico-vpp-agent](https://github.com/projectcalico/vpp-dataplane/tree/{{vppbranch}}/calico-vpp-agent). - -This agent is split in four main components, which interact with the k8s and {{prodname}} APIs to configure VPP. These components are the routing manager, the CNI server, the services manager and the policies manager. - -**Routing manager** - -The {{prodname}} VPP agent embeds a GoBGP daemon, and dynamically updates its configuration (including peers, ASN, etc.) according to the {{prodname}} configuration. As the calico-vpp-node pod runs in the host network namespace, the BGP daemon uses the host's TCP stack, and sends and receives traffic through the host's interface `vpptap0`. - -When routes are added or removed in BGP, the routing manager reflects the changes in VPP. The routes are installed differently depending on the {{prodname}} configuration. If the configuration requires the use of an IPIP or VXLAN tunnel, then the tunnel interface will be created on demand in VPP, and the route will be added through the tunnel. Otherwise, the route is simply added as-is in VPP. - -**CNI server** - -This component implements a server that receives gRPC request from the {{prodname}} CNI (configured with a gRPC dataplane) through a Unix socket mounted on the host. - -When it receives an ADD request, the CNI server creates a tap interface in the container's namespace, and configures it with the IP address and routes chosen by {{prodname}}. The routes' next hop is an otherwise unused link-local address, both in IPv4 and in IPv6. A /32 or /128 route is added in VPP as well for the container address through the tap. When it receives a DEL request, the CNI servers cleans up the tap from VPP and from the container's namespace. - -**Services manager** - -This component is the equivalent of kube-proxy for VPP, i.e. it configures NAT load-balancing rules to implement Kubernetes services in VPP. It watches the Kubernetes Services and Endpoints APIs, and updates the VPP configuration on each change. Service load balancing is implemented with a {{prodname}}-specific DNAT plugin in VPP. - -**Policies manager** - -This component implements {{prodname}} policies in VPP. Felix ({{prodname}}'s policy agent) is configured to use a lightweight proxy as its dataplane. This proxy relays all the configuration messages sent by Felix to the `calico-vpp-agent`, and status updates the other way. The VPP agent then uses a custom plugin in VPP to implement policies. - -## Network architecture - -### Primary interface configuration - -To send and receive the packets on behalf on the containers, VPP needs to use one of the host's network interfaces. There are various ways to do so, which differ in performance and configuration complexity: - -- AF_PACKET: the slowest option, but also the most universally supported one as it works for every Linux network device. The interface is placed in a dedicated network namespace in order not to disrupt the host connectivity that is set up by VPP. -- AF_XDP: much more performant than AF_PACKET, but it requires a recent kernel version (5.4+). The interface is placed in a dedicated network namespace in order not to disrupt the host connectivity that is set up by VPP. -- DPDK: VPP can use DPDK to drive interfaces. This is more performant than AF_XDP. DPDK supports a large number of interfaces, but requires hugepages to be configured on the host to work. The interface is bound to a specific PCI driver on startup, and thus disappears from the host's kernel network devices. -- VPP native drivers: the most performant option, but a limited number of interfaces are supported. Supported interfaces include Intel AVF, Mellanox Connect-X series, vmxnet3 (VMware) interfaces, and virtio (use by Qemu/KVM and GCE) interfaces. As with DPDK, the interface needs to be bound to a specific PCI driver on startup, and thus disappears from the host. Using native drivers requires custom configuration, except for `virtio` and Intel AVF interfaces which are supported by `vpp-manager`. - -### Host network configuration - -See the [dedicated page](host-network.mdx). - -### Container interfaces - -When a Pod is scheduled on the host, the kubelet service will create the network namespace for the new pod, and then use the CNI to request that {{prodname}} configures an interface in this namespace. {{prodname}} will first compute the IP configuration for the host (address and routes), and then pass that to the VPP agent. VPP will then create a tun interface in the desired namespace for the container, and configure it with the required address and routes. This makes all the container traffic flow through VPP. - -### Container routing & services load balancing - -To determine the host where each pod is running, {{prodname}} uses BGP. Routes learned from BGP are installed in VPP to reach the containers that are running on other nodes. Depending on the {{prodname}} configuration, these routes are either to directly connected hosts, or through tunnel interfaces if encapsulation is required. - -Services load balancing is implemented with NAT rules in VPP, in a very similar way to what kube-proxy is doing. The source address is preserved when possible for external connections. - -Here is the resulting logical network topology: - -![Network architecture](/img/calico/vpp-net-arch.svg) diff --git a/calico_versioned_docs/version-3.25/reference/vpp/uplink-configuration.mdx b/calico_versioned_docs/version-3.25/reference/vpp/uplink-configuration.mdx deleted file mode 100644 index 0f98bc0734..0000000000 --- a/calico_versioned_docs/version-3.25/reference/vpp/uplink-configuration.mdx +++ /dev/null @@ -1,174 +0,0 @@ ---- -description: Configuration parameters for the primary interface in VPP. ---- - -# Primary interface configuration - -You can choose different ways to consume the host's primary interface with VPP, usually with a tradeoff between performance and simplicity of configuration. Here are the main supported configurations. - -- `virtio` the interface is consumed with a native VPP driver. Performance is good and set up is simple, but only virtio interfaces are supported -- `avf` we create a virtual function and consume it with a native VPP driver. Performance is good and setup simple, but only intel AVF interfaces are supported -- `af_packet` the interface stays in Linux which passes packets to VPP. Performance is low, but it works out of the box with any interface -- `af_xdp` packets are passed via eBPF. This requires a `>=5.4` kernel, but works out of the box with good performance -- `dpdk` the interface is removed from Linux and consumed with the dpdk library. Performance and support are good, but setup can be complex -- other native VPP drivers bring better performance than `dpdk` but require complex manual setup - -## General mechanics - -The `calico-vpp-config` ConfigMap section of the manifest file contains the key `CALICOVPP_INTERFACES`, which is a dictionary with parameters -specific to interfaces in calicovpp: - -```yaml - # Configures parameters for calicovpp agent and vpp manager - CALICOVPP_INTERFACES: |- - { - "maxPodIfSpec": { - "rx": 10, "tx": 10, "rxqsz": 1024, "txqsz": 1024 - }, - "defaultPodIfSpec": { - "rx": 1, "tx":1, "isl3": true - }, - "vppHostTapSpec": { - "rx": 1, "tx":1, "rxqsz": 1024, "txqsz": 1024, "isl3": false - }, - "uplinkInterfaces": [ - { - "interfaceName": "eth1", - "vppDriver": "af_packet" - } - ] - } -``` - -The field `uplinkInterfaces` contains a list of interfaces and their configuration, with the first element being the primary/main interface, and the -rest (if any) being the secondary host interfaces. The way the primary interface gets configured is controlled by the `vppDriver` field in `uplinkInterfaces[0]`. -Leaving the `vppDriver` field empty (or unspecified) will try all drivers (except `dpdk` which needs to be specified explicitly) supported in your setup, -starting with the most performant. You'll still need to allocate hugepages if you want, for example, virtio to work. - -:::note - -`CALICOVPP_NATIVE_DRIVER` way of specifying the driver to use is still supported. Refer to **Legacy options** sub-section of [Getting Started](../../getting-started/kubernetes/vpp/getting-started.mdx). - -::: - -## Using the native Virtio driver - -You can use this driver if your primary interface is virtio [`realpath /sys/bus/pci/devices//driver` gives `.../virtio-net`] - -- Ensure you have hugepages available on your system (`sysctl -w vm.nr_hugepages=512`) -- Ensure `vfio-pci` is loaded (`sudo modprobe vfio-pci`) -- For the primary interface, `uplinkInterfaces[0]`, set `vppDriver` to "virtio" -- Also ensure that your vpp config has no `dpdk` stanza and its plugin disabled - -Optionally if you would like to set the number/size of **rx** queues, refer to **UplinkInterfaceSpec** sub-section -of [Getting Started](../../getting-started/kubernetes/vpp/getting-started.mdx). - -## Using the native AVF driver - -You can use this driver if your primary interface is supported by AVF [`realpath /sys/bus/pci/devices//driver` gives `.../i40e`] - -- Ensure `vfio-pci` is loaded (`sudo modprobe vfio-pci`) -- For the primary interface, `uplinkInterfaces[0]`, set `vppDriver` to "avf" -- Also ensure that your vpp config has no `dpdk` stanza and its plugin disabled - -Optionally if you would like to set the number/size of **rx** queues, refer to **UplinkInterfaceSpec** sub-section -of [Getting Started](../../getting-started/kubernetes/vpp/getting-started.mdx). - -## Using AF_XDP - -:::caution - -Ensure your kernel is at least `5.4` with `uname -r` - -::: - -- For the primary interface, `uplinkInterfaces[0]`, set `vppDriver` to "af_xdp" -- Also ensure that your vpp config has no `dpdk` stanza and its plugin disabled -- Finally `FELIX_XDPENABLED` should be set to `false` on the `calico-node` container otherwise felix will periodically cleanup the VPP configuration - -```yaml ---- -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: calico-node - namespace: kube-system -spec: - template: - spec: - containers: - - name: calico-node - env: - - name: FELIX_XDPENABLED - value: 'false' -``` - -With kustomize use `kubectl kustomize ./yaml/overlays/af-xdp | kubectl apply -f -` - -Optionally if you would like to set the number/size of **rx** queues or if you would like to customize whether we busy-poll the interface (`polling`), -only use interrupts to wake us up (`interrupt`) or switch between both depending on the load (`adaptive`), refer to **UplinkInterfaceSpec** sub-section -of [Getting Started](../../getting-started/kubernetes/vpp/getting-started.mdx). - -#### Side notes - -- AF_XDP won't start if you specify `buffers { buffers-per-numa }` to be too big (65536 should work) - -## Using AF_PACKET - -- For the primary interface, `uplinkInterfaces[0]`, set `vppDriver` to "af_packet" -- Also ensure that your vpp config has no `dpdk` stanza and the dpdk plugin is disabled - -You can also use `kubectl kustomize ./yaml/overlays/af-packet | kubectl apply -f -` - -## Using DPDK - -- Ensure you have hugepages available on your system (`sysctl -w vm.nr_hugepages=512`) -- For the primary interface, `uplinkInterfaces[0]`, set `vppDriver` to "dpdk" - -## Using native drivers with vpp's CLI - -This is a rather advanced/experimental setup and we'll take the example of the AVF driver for this, using vpp cli, but any vpp driver can be used. -This allow to efficiently support other interface types. - -- For the primary interface, `uplinkInterfaces[0]`, set `vppDriver` to "none" -- Ensure that your vpp config has no `dpdk` stanza and the dpdk plugin is disabled -- Lastly, add an `exec /etc/vpp/startup.exec` entry in `unix { .. }` - -```yaml -vpp_config_template: |- - unix { - nodaemon - full-coredump - log /var/run/vpp/vpp.log - cli-listen /var/run/vpp/cli.sock - exec /etc/vpp/startup.exec - } - ... - # removed dpdk { ... } - ... - plugins { - plugin default { enable } - plugin calico_plugin.so { enable } - plugin dpdk_plugin.so { disable } - } -``` - -Then update the `CALICOVPP_CONFIG_EXEC_TEMPLATE` environment variable to pass the interface creation cli(s). - -```yaml -kind: DaemonSet -apiVersion: apps/v1 -metadata: - name: calico-vpp-node - namespace: calico-vpp-dataplane -spec: - template: - spec: - containers: - - name: vpp - env: - - name: CALICOVPP_CONFIG_EXEC_TEMPLATE - value: 'create interface avf 0000:ab:cd.1 num-rx-queues 1' -``` - -In the specific case of the AVF driver, the PCI id must belong to a VF that can be created with the `avf.sh` [script](https://github.com/projectcalico/vpp-dataplane/blob/{{vppbranch}}/test/scripts/utils/avf.sh). Different drivers will have different requirements. diff --git a/calico_versioned_docs/version-3.25/release-notes/index.mdx b/calico_versioned_docs/version-3.25/release-notes/index.mdx deleted file mode 100644 index 98b69ca6d0..0000000000 --- a/calico_versioned_docs/version-3.25/release-notes/index.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -description: What's new, and why features provide value for upgrading. -title: Release notes ---- - -import ReleaseNotes from '@site/calico_versioned_docs/version-3.25/_includes/components/ReleaseNotes'; - -# Release notes - - diff --git a/calico_versioned_docs/version-3.25/releases.json b/calico_versioned_docs/version-3.25/releases.json deleted file mode 100644 index fbe3883346..0000000000 --- a/calico_versioned_docs/version-3.25/releases.json +++ /dev/null @@ -1,158 +0,0 @@ -[ - { - "title": "v3.25.2", - "tigera-operator": { - "image": "tigera/operator", - "registry": "quay.io", - "version": "v1.29.6" - }, - "components": { - "typha": { - "version": "v3.25.2" - }, - "calicoctl": { - "version": "v3.25.2" - }, - "calico/node": { - "version": "v3.25.2" - }, - "calico/cni": { - "version": "v3.25.2" - }, - "calico/apiserver": { - "version": "v3.25.2" - }, - "calico/kube-controllers": { - "version": "v3.25.2" - }, - "calico/flannel-migration-controller": { - "version": "v3.25.2" - }, - "calico/windows": { - "version": "v3.25.2" - }, - "networking-calico": { - "version": "v3.25.2" - }, - "flannel": { - "version": "v0.16.3" - }, - "calico/dikastes": { - "version": "v3.25.2" - }, - "flexvol": { - "version": "v3.25.2" - }, - "csi-driver": { - "version": "v3.25.2" - }, - "csi-node-driver-registrar": { - "version": "v3.25.2" - } - } - }, - { - "title": "v3.25.1", - "tigera-operator": { - "image": "tigera/operator", - "registry": "quay.io", - "version": "v1.29.3" - }, - "components": { - "typha": { - "version": "v3.25.1" - }, - "calicoctl": { - "version": "v3.25.1" - }, - "calico/node": { - "version": "v3.25.1" - }, - "calico/cni": { - "version": "v3.25.1" - }, - "calico/apiserver": { - "version": "v3.25.1" - }, - "calico/kube-controllers": { - "version": "v3.25.1" - }, - "calico/flannel-migration-controller": { - "version": "v3.25.1" - }, - "calico/windows": { - "version": "v3.25.1" - }, - "networking-calico": { - "version": "v3.25.1" - }, - "flannel": { - "version": "v0.16.3" - }, - "calico/dikastes": { - "version": "v3.25.1" - }, - "flexvol": { - "version": "v3.25.1" - }, - "csi-driver": { - "version": "v3.25.1" - }, - "csi-node-driver-registrar": { - "version": "v3.25.1" - } - } - }, - { - "title": "v3.25.0", - "tigera-operator": { - "image": "tigera/operator", - "registry": "quay.io", - "version": "v1.29.0" - }, - "components": { - "typha": { - "version": "v3.25.0" - }, - "calicoctl": { - "version": "v3.25.0" - }, - "calico/node": { - "version": "v3.25.0" - }, - "calico/cni": { - "version": "v3.25.0" - }, - "calico/apiserver": { - "version": "v3.25.0" - }, - "calico/kube-controllers": { - "version": "v3.25.0" - }, - "calico/flannel-migration-controller": { - "version": "v3.25.0" - }, - "calico/windows": { - "version": "v3.25.0" - }, - "networking-calico": { - "version": "v3.25.0" - }, - "flannel": { - "version": "v0.16.3" - }, - "calico/dikastes": { - "version": "v3.25.0" - }, - "flexvol": { - "version": "v3.25.0" - }, - "csi-driver": { - "version": "v3.25.0" - }, - "csi-node-driver-registrar": { - "version": "v3.25.0" - } - } - } -] diff --git a/calico_versioned_docs/version-3.25/training/index.mdx b/calico_versioned_docs/version-3.25/training/index.mdx deleted file mode 100644 index 899c659d64..0000000000 --- a/calico_versioned_docs/version-3.25/training/index.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -description: Links to Calico resources for onboarding and training. ---- - -# Training and resources - -| Resource | URL | -| --------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| Get a Calico cluster up and running in 15 minutes | [Quickstart](../getting-started/kubernetes/quickstart.mdx) | -| eBook: Introduction to Kubernetes Networking and Security | [Download ebook](https://www.tigera.io/lp/kubernetes-networking-ebook/) | -| Certified Calico Operator training | - [Level 1](https://academy.tigera.io/course/certified-calico-operator-level-1/)
    - [AWS Expert](https://academy.tigera.io/course/certified-calico-operator-aws-expert/)
    - [eBPF](https://academy.tigera.io/course/certified-calico-operator-ebpf/)
    - [Azure](https://academy.tigera.io/course/certified-calico-operator-azure-expert/) | -| Workshops and events | - [Workshops and events](https://www.tigera.io/events/)
    - [Tradeshows](https://www.tigera.io/lp/tradeshows/) | -| Videos, datasheets | [Videos, case studies, datasheets, etc.](https://www.tigera.io/resources/) | -| Blog | [Technical blog](https://www.tigera.io/blog/) | -| Stay connected | - [GitHub](https://github.com/projectcalico/calico)
    - [Calico YouTube channel](https://www.youtube.com/channel/UCFpTnXDNcBoXI4gqCDmegFA)
    - [Security bulletin of vulnerabilities](https://www.tigera.io/security-bulletins/)
    - [Twitter](https://twitter.com/projectcalico) | -| Report a bug | [Create a new issue](https://github.com/projectcalico/calico/issues) | diff --git a/calico_versioned_docs/version-3.25/variables.js b/calico_versioned_docs/version-3.25/variables.js deleted file mode 100644 index b30864b103..0000000000 --- a/calico_versioned_docs/version-3.25/variables.js +++ /dev/null @@ -1,43 +0,0 @@ -const releases = require('./releases.json'); - -const variables = { - releaseTitle: 'v3.25.2', - prodname: 'Calico', - prodnamedash: 'calico', - version: 'v3.25', - baseUrl: '/calico/3.25', - filesUrl: 'https://projectcalico.docs.tigera.io/v3.25', - tutorialFilesURL: 'https://docs.tigera.io/files', - calicoReleasesURL: 'https://github.com/projectcalico/calico/releases/download', - tmpScriptsURL: 'https://docs.tigera.io/calico/3.25', - prodnameWindows: 'Calico for Windows', - nodecontainer: 'calico/node', - noderunning: 'calico-node', - rootDirWindows: 'C:\\CalicoWindows', - ppa_repo_name: 'calico-3.25', - manifestsUrl: 'https://raw.githubusercontent.com/projectcalico/calico/v3.25.2', - releases, - registry: '', - vppbranch: 'v3.25.1', - tigeraOperator: releases[0]['tigera-operator'], - tigeraOperatorVersionShort: releases[0]['tigera-operator'].version.split('.').slice(0, 2).join('.'), - imageNames: { - 'calico/node': 'calico/node', - calicoctl: 'calico/ctl', - typha: 'calico/typha', - 'calico/cni': 'calico/cni', - 'calico/apiserver': 'calico/apiserver', - 'calico/kube-controllers': 'calico/kube-controllers', - 'calico-upgrade': 'calico-upgrade', - 'calico/windows': 'calico/windows', - flannel: 'docker.io/flannelcni/flannel', - flannelMigration: 'calico/flannel-migration-controller', - 'calico/dikastes': 'calico/dikastes', - 'pilot-webhook': 'calico/pilot-webhook', - flexvol: 'calico/pod2daemon-flexvol', - 'csi-driver': 'calico/csi', - 'csi-node-driver-registrar': 'calico/node-driver-registrar', - }, -}; - -module.exports = variables; diff --git a/calico_versioned_sidebars/version-3.25-sidebars.json b/calico_versioned_sidebars/version-3.25-sidebars.json deleted file mode 100644 index e9a4de0440..0000000000 --- a/calico_versioned_sidebars/version-3.25-sidebars.json +++ /dev/null @@ -1,802 +0,0 @@ -{ - "calicoSidebar": [ - { - "type": "category", - "label": "About", - "link": { - "type": "doc", - "id": "about/index" - }, - "items": [ - "about/about-k8s-networking", - "about/about-network-policy", - "about/about-kubernetes-services", - "about/about-kubernetes-ingress", - "about/about-kubernetes-egress", - "about/about-ebpf" - ] - }, - { - "type": "category", - "label": "Install Calico", - "link": { - "type": "doc", - "id": "getting-started/index" - }, - "items": [ - { - "type": "category", - "label": "Kubernetes", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/index" - }, - "items": [ - "getting-started/kubernetes/quickstart", - { - "type": "category", - "label": "Managed public cloud", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/managed-public-cloud/index" - }, - "items": [ - "getting-started/kubernetes/managed-public-cloud/eks", - "getting-started/kubernetes/managed-public-cloud/gke", - "getting-started/kubernetes/managed-public-cloud/iks", - "getting-started/kubernetes/managed-public-cloud/aks" - ] - }, - { - "type": "category", - "label": "Self-managed public cloud", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/self-managed-public-cloud/index" - }, - "items": [ - "getting-started/kubernetes/self-managed-public-cloud/aws", - "getting-started/kubernetes/self-managed-public-cloud/gce", - "getting-started/kubernetes/self-managed-public-cloud/azure", - "getting-started/kubernetes/self-managed-public-cloud/do" - ] - }, - { - "type": "category", - "label": "Self-managed on-premises", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/self-managed-onprem/index" - }, - "items": [ - "getting-started/kubernetes/self-managed-onprem/onpremises", - "getting-started/kubernetes/self-managed-onprem/config-options" - ] - }, - { - "type": "category", - "label": "OpenShift", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/openshift/index" - }, - "items": [ - "getting-started/kubernetes/openshift/requirements", - "getting-started/kubernetes/openshift/installation" - ] - }, - "getting-started/kubernetes/rancher", - { - "type": "category", - "label": "Flannel", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/flannel/index" - }, - "items": [ - "getting-started/kubernetes/flannel/install-for-flannel", - "getting-started/kubernetes/flannel/migration-from-flannel" - ] - }, - { - "type": "category", - "label": "Calico for Windows", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/windows-calico/index" - }, - "items": [ - "getting-started/kubernetes/windows-calico/limitations", - "getting-started/kubernetes/windows-calico/quickstart", - "getting-started/kubernetes/windows-calico/demo", - { - "type": "category", - "label": "Kubernetes", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/windows-calico/kubernetes/index" - }, - "items": [ - "getting-started/kubernetes/windows-calico/kubernetes/requirements", - "getting-started/kubernetes/windows-calico/kubernetes/standard", - "getting-started/kubernetes/windows-calico/kubernetes/rancher" - ] - }, - "getting-started/kubernetes/windows-calico/openshift-installation", - "getting-started/kubernetes/windows-calico/kubeconfig", - "getting-started/kubernetes/windows-calico/maintain", - "getting-started/kubernetes/windows-calico/troubleshoot" - ] - }, - { - "type": "category", - "label": "K3s", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/k3s/index" - }, - "items": [ - "getting-started/kubernetes/k3s/quickstart", - "getting-started/kubernetes/k3s/multi-node-install" - ] - }, - "getting-started/kubernetes/helm", - "getting-started/kubernetes/microk8s", - "getting-started/kubernetes/minikube", - { - "type": "category", - "label": "Calico the hard way", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/hardway/index" - }, - "items": [ - "getting-started/kubernetes/hardway/overview", - "getting-started/kubernetes/hardway/standing-up-kubernetes", - "getting-started/kubernetes/hardway/the-calico-datastore", - "getting-started/kubernetes/hardway/configure-ip-pools", - "getting-started/kubernetes/hardway/install-cni-plugin", - "getting-started/kubernetes/hardway/install-typha", - "getting-started/kubernetes/hardway/install-node", - "getting-started/kubernetes/hardway/configure-bgp-peering", - "getting-started/kubernetes/hardway/test-networking", - "getting-started/kubernetes/hardway/test-network-policy", - "getting-started/kubernetes/hardway/end-user-rbac", - "getting-started/kubernetes/hardway/istio-integration" - ] - }, - "getting-started/kubernetes/requirements", - { - "type": "category", - "label": "VPP dataplane", - "link": { - "type": "doc", - "id": "getting-started/kubernetes/vpp/index" - }, - "items": [ - "getting-started/kubernetes/vpp/getting-started", - "getting-started/kubernetes/vpp/ipsec", - "getting-started/kubernetes/vpp/specifics" - ] - } - ] - }, - { - "type": "category", - "label": "OpenStack", - "link": { - "type": "doc", - "id": "getting-started/openstack/index" - }, - "items": [ - "getting-started/openstack/overview", - "getting-started/openstack/requirements", - { - "type": "category", - "label": "Installation", - "link": { - "type": "doc", - "id": "getting-started/openstack/installation/index" - }, - "items": [ - "getting-started/openstack/installation/overview", - "getting-started/openstack/installation/ubuntu", - "getting-started/openstack/installation/redhat", - "getting-started/openstack/installation/devstack", - "getting-started/openstack/installation/verification" - ] - } - ] - }, - { - "type": "category", - "label": "Non-cluster hosts", - "link": { - "type": "doc", - "id": "getting-started/bare-metal/index" - }, - "items": [ - "getting-started/bare-metal/about", - "getting-started/bare-metal/requirements", - { - "type": "category", - "label": "Installation", - "link": { - "type": "doc", - "id": "getting-started/bare-metal/installation/index" - }, - "items": [ - "getting-started/bare-metal/installation/container", - "getting-started/bare-metal/installation/binary-mgr", - "getting-started/bare-metal/installation/binary" - ] - } - ] - } - ] - }, - { - "type": "category", - "label": "Networking", - "link": { - "type": "doc", - "id": "networking/index" - }, - "items": [ - "networking/determine-best-networking", - { - "type": "category", - "label": "Configure networking", - "link": { - "type": "doc", - "id": "networking/configuring/index" - }, - "items": [ - "networking/configuring/bgp", - "networking/configuring/vxlan-ipip", - "networking/configuring/advertise-service-ips", - "networking/configuring/mtu", - "networking/configuring/workloads-outside-cluster", - "networking/configuring/use-ipvs", - "networking/configuring/sidecar-acceleration", - "networking/configuring/pod-mac-address" - ] - }, - { - "type": "category", - "label": "Customize IP address management", - "link": { - "type": "doc", - "id": "networking/ipam/index" - }, - "items": [ - "networking/ipam/get-started-ip-addresses", - "networking/ipam/ip-autodetection", - "networking/ipam/ipv6", - "networking/ipam/ipv6-control-plane", - "networking/ipam/add-floating-ip", - "networking/ipam/use-specific-ip", - "networking/ipam/assign-ip-addresses-topology", - "networking/ipam/migrate-pools", - "networking/ipam/change-block-size", - "networking/ipam/legacy-firewalls" - ] - }, - { - "type": "category", - "label": "Calico networking for OpenStack", - "link": { - "type": "doc", - "id": "networking/openstack/index" - }, - "items": [ - "networking/openstack/dev-machine-setup", - "networking/openstack/ipv6", - "networking/openstack/connectivity", - "networking/openstack/labels", - "networking/openstack/configuration", - "networking/openstack/semantics", - "networking/openstack/floating-ips", - "networking/openstack/service-ips", - "networking/openstack/host-routes", - "networking/openstack/multiple-regions", - "networking/openstack/kuryr", - "networking/openstack/neutron-api" - ] - } - ] - }, - { - "type": "category", - "label": "Network policy", - "link": { - "type": "doc", - "id": "network-policy/index" - }, - "items": [ - "network-policy/adopt-zero-trust", - "network-policy/non-privileged", - { - "type": "category", - "label": "Get started with policy", - "link": { - "type": "doc", - "id": "network-policy/get-started/index" - }, - "items": [ - { - "type": "category", - "label": "Calico policy", - "link": { - "type": "doc", - "id": "network-policy/get-started/calico-policy/index" - }, - "items": [ - "network-policy/get-started/calico-policy/calico-network-policy", - "network-policy/get-started/calico-policy/calico-labels", - "network-policy/get-started/calico-policy/network-policy-openstack", - "network-policy/get-started/calico-policy/calico-policy-tutorial" - ] - }, - { - "type": "category", - "label": "Kubernetes policy", - "link": { - "type": "doc", - "id": "network-policy/get-started/kubernetes-policy/index" - }, - "items": [ - "network-policy/get-started/kubernetes-policy/kubernetes-network-policy", - "network-policy/get-started/kubernetes-policy/kubernetes-demo", - "network-policy/get-started/kubernetes-policy/kubernetes-policy-basic", - "network-policy/get-started/kubernetes-policy/kubernetes-policy-advanced" - ] - }, - "network-policy/get-started/kubernetes-default-deny" - ] - }, - { - "type": "category", - "label": "Policy rules", - "link": { - "type": "doc", - "id": "network-policy/policy-rules/index" - }, - "items": [ - "network-policy/policy-rules/policy-rules-overview", - "network-policy/policy-rules/namespace-policy", - "network-policy/policy-rules/service-policy", - "network-policy/policy-rules/service-accounts", - "network-policy/policy-rules/external-ips-policy", - "network-policy/policy-rules/icmp-ping" - ] - }, - { - "type": "category", - "label": "Policy for hosts", - "link": { - "type": "doc", - "id": "network-policy/hosts/index" - }, - "items": [ - "network-policy/hosts/protect-hosts", - "network-policy/hosts/kubernetes-nodes", - "network-policy/hosts/protect-hosts-tutorial", - "network-policy/hosts/host-forwarded-traffic" - ] - }, - { - "type": "category", - "label": "Policy for services", - "link": { - "type": "doc", - "id": "network-policy/services/index" - }, - "items": [ - "network-policy/services/kubernetes-node-ports", - "network-policy/services/services-cluster-ips" - ] - }, - { - "type": "category", - "label": "Policy for Istio", - "link": { - "type": "doc", - "id": "network-policy/istio/index" - }, - "items": [ - "network-policy/istio/app-layer-policy", - "network-policy/istio/http-methods", - "network-policy/istio/enforce-policy-istio" - ] - }, - { - "type": "category", - "label": "Policy for extreme traffic", - "link": { - "type": "doc", - "id": "network-policy/extreme-traffic/index" - }, - "items": [ - "network-policy/extreme-traffic/high-connection-workloads", - "network-policy/extreme-traffic/defend-dos-attack" - ] - }, - "network-policy/encrypt-cluster-pod-traffic", - { - "type": "category", - "label": "Secure Calico component communications", - "link": { - "type": "doc", - "id": "network-policy/comms/index" - }, - "items": [ - "network-policy/comms/crypto-auth", - "network-policy/comms/reduce-nodes", - "network-policy/comms/secure-metrics", - "network-policy/comms/secure-bgp" - ] - } - ] - }, - { - "type": "category", - "label": "Operations", - "link": { - "type": "doc", - "id": "operations/index" - }, - "items": [ - { - "type": "category", - "label": "Upgrade", - "link": { - "type": "doc", - "id": "operations/upgrading/index" - }, - "items": [ - "operations/upgrading/kubernetes-upgrade", - "operations/upgrading/openshift-upgrade", - "operations/upgrading/openstack-upgrade" - ] - }, - { - "type": "category", - "label": "calicoctl", - "link": { - "type": "doc", - "id": "operations/calicoctl/index" - }, - "items": [ - "operations/calicoctl/install", - { - "type": "category", - "label": "Configure calicoctl", - "link": { - "type": "doc", - "id": "operations/calicoctl/configure/index" - }, - "items": [ - "operations/calicoctl/configure/overview", - "operations/calicoctl/configure/etcd", - "operations/calicoctl/configure/kdd" - ] - } - ] - }, - { - "type": "category", - "label": "Deploy image options", - "link": { - "type": "doc", - "id": "operations/image-options/index" - }, - "items": [ - "operations/image-options/imageset", - "operations/image-options/alternate-registry" - ] - }, - "operations/datastore-migration", - "operations/operator-migration", - "operations/install-apiserver", - { - "type": "category", - "label": "eBPF", - "link": { - "type": "doc", - "id": "operations/ebpf/index" - }, - "items": [ - "operations/ebpf/use-cases-ebpf", - "operations/ebpf/enabling-ebpf", - "operations/ebpf/install", - "operations/ebpf/troubleshoot-ebpf" - ] - }, - { - "type": "category", - "label": "Monitor", - "link": { - "type": "doc", - "id": "operations/monitor/index" - }, - "items": [ - "operations/monitor/monitor-component-metrics", - "operations/monitor/monitor-component-visual" - ] - }, - "operations/decommissioning-a-node", - "operations/fips", - { - "type": "category", - "label": "Troubleshoot", - "link": { - "type": "doc", - "id": "operations/troubleshoot/index" - }, - "items": [ - "operations/troubleshoot/troubleshooting", - "operations/troubleshoot/commands", - "operations/troubleshoot/component-logs", - "operations/troubleshoot/vpp" - ] - }, - "operations/certificate-management" - ] - }, - { - "type": "category", - "label": "Reference", - "link": { - "type": "doc", - "id": "reference/index" - }, - "items": [ - "reference/api", - { - "type": "doc", - "id": "reference/installation/api", - "label": "Installation API" - }, - { - "type": "category", - "label": "calicoctl", - "link": { - "type": "doc", - "id": "reference/calicoctl/index" - }, - "items": [ - "reference/calicoctl/overview", - "reference/calicoctl/create", - "reference/calicoctl/replace", - "reference/calicoctl/apply", - "reference/calicoctl/delete", - "reference/calicoctl/get", - "reference/calicoctl/patch", - "reference/calicoctl/label", - "reference/calicoctl/convert", - { - "type": "category", - "label": "ipam", - "link": { - "type": "doc", - "id": "reference/calicoctl/ipam/index" - }, - "items": [ - "reference/calicoctl/ipam/overview", - "reference/calicoctl/ipam/check", - "reference/calicoctl/ipam/release", - "reference/calicoctl/ipam/show", - "reference/calicoctl/ipam/configure", - "reference/calicoctl/ipam/split" - ] - }, - { - "type": "category", - "label": "node", - "link": { - "type": "doc", - "id": "reference/calicoctl/node/index" - }, - "items": [ - "reference/calicoctl/node/overview", - "reference/calicoctl/node/run", - "reference/calicoctl/node/status", - "reference/calicoctl/node/diags", - "reference/calicoctl/node/checksystem" - ] - }, - { - "type": "category", - "label": "datastore", - "link": { - "type": "doc", - "id": "reference/calicoctl/datastore/index" - }, - "items": [ - "reference/calicoctl/datastore/overview", - { - "type": "category", - "label": "migrate", - "link": { - "type": "doc", - "id": "reference/calicoctl/datastore/migrate/index" - }, - "items": [ - "reference/calicoctl/datastore/migrate/overview", - "reference/calicoctl/datastore/migrate/export", - "reference/calicoctl/datastore/migrate/import", - "reference/calicoctl/datastore/migrate/lock", - "reference/calicoctl/datastore/migrate/unlock" - ] - } - ] - }, - "reference/calicoctl/version" - ] - }, - { - "type": "category", - "label": "Resource definitions", - "link": { - "type": "doc", - "id": "reference/resources/index" - }, - "items": [ - "reference/resources/overview", - "reference/resources/bgpconfig", - "reference/resources/bgppeer", - "reference/resources/blockaffinity", - "reference/resources/caliconodestatus", - "reference/resources/felixconfig", - "reference/resources/globalnetworkpolicy", - "reference/resources/globalnetworkset", - "reference/resources/hostendpoint", - "reference/resources/ippool", - "reference/resources/ipreservation", - "reference/resources/ipamconfig", - "reference/resources/kubecontrollersconfig", - "reference/resources/networkpolicy", - "reference/resources/networkset", - "reference/resources/node", - "reference/resources/profile", - "reference/resources/workloadendpoint" - ] - }, - { - "type": "category", - "label": "Configuring etcd RBAC", - "link": { - "type": "doc", - "id": "reference/etcd-rbac/index" - }, - "items": [ - "reference/etcd-rbac/overview", - "reference/etcd-rbac/certificate-generation", - "reference/etcd-rbac/users-and-roles", - "reference/etcd-rbac/kubernetes", - "reference/etcd-rbac/kubernetes-advanced", - "reference/etcd-rbac/calico-etcdv3-paths" - ] - }, - "reference/configure-calico-node", - { - "type": "category", - "label": "Felix", - "link": { - "type": "doc", - "id": "reference/felix/index" - }, - "items": [ - "reference/felix/configuration", - "reference/felix/prometheus" - ] - }, - { - "type": "category", - "label": "Typha", - "link": { - "type": "doc", - "id": "reference/typha/index" - }, - "items": [ - "reference/typha/overview", - "reference/typha/configuration", - "reference/typha/prometheus" - ] - }, - "reference/configure-cni-plugins", - { - "type": "category", - "label": "Calico Kubernetes controllers", - "link": { - "type": "doc", - "id": "reference/kube-controllers/index" - }, - "items": [ - "reference/kube-controllers/configuration", - "reference/kube-controllers/prometheus" - ] - }, - { - "type": "category", - "label": "Configuration on public clouds", - "link": { - "type": "doc", - "id": "reference/public-cloud/index" - }, - "items": [ - "reference/public-cloud/aws", - "reference/public-cloud/azure", - "reference/public-cloud/gce", - "reference/public-cloud/ibm" - ] - }, - { - "type": "category", - "label": "Host endpoints", - "link": { - "type": "doc", - "id": "reference/host-endpoints/index" - }, - "items": [ - "reference/host-endpoints/overview", - "reference/host-endpoints/connectivity", - "reference/host-endpoints/objects", - "reference/host-endpoints/selector", - "reference/host-endpoints/failsafe", - "reference/host-endpoints/pre-dnat", - "reference/host-endpoints/forwarded", - "reference/host-endpoints/summary", - "reference/host-endpoints/conntrack" - ] - }, - { - "type": "category", - "label": "Architecture", - "link": { - "type": "doc", - "id": "reference/architecture/index" - }, - "items": [ - "reference/architecture/overview", - "reference/architecture/data-path", - { - "type": "category", - "label": "Network design", - "link": { - "type": "doc", - "id": "reference/architecture/design/index" - }, - "items": [ - "reference/architecture/design/l2-interconnect-fabric", - "reference/architecture/design/l3-interconnect-fabric" - ] - } - ] - }, - { - "type": "category", - "label": "VPP dataplane", - "link": { - "type": "doc", - "id": "reference/vpp/index" - }, - "items": [ - "reference/vpp/uplink-configuration", - "reference/vpp/technical-details", - "reference/vpp/host-network" - ] - }, - "reference/faq", - "reference/involved", - { - "type": "link", - "label": "Attributions", - "href": "pathname:///calico/3.25/licenses/third-party-attributions.html" - } - ] - }, - "release-notes/index" - ] -} diff --git a/docusaurus.config.js b/docusaurus.config.js index 8a5b366175..4b3b72f105 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -369,7 +369,7 @@ const config = { path: 'calico', routeBasePath: 'calico', editCurrentVersion: true, - onlyIncludeVersions: ['3.28','3.27','3.26','3.25'], + onlyIncludeVersions: ['3.28','3.27','3.26'], versions: { current: { label: 'Next', @@ -391,11 +391,6 @@ const config = { path: '3.26', banner: 'none', }, - 3.25: { - label: '3.25', - path: '3.25', - banner: 'none', - }, }, sidebarPath: require.resolve('./sidebars-calico.js'), beforeDefaultRemarkPlugins: [variablesPlugin, componentImagePlugin], diff --git a/releases.html b/releases.html deleted file mode 100644 index 73f7471bcf..0000000000 --- a/releases.html +++ /dev/null @@ -1,77 +0,0 @@ - - - diff --git a/src/pages/archive.md b/src/pages/archive.md index 0bf58d9f7c..bad1c155e1 100644 --- a/src/pages/archive.md +++ b/src/pages/archive.md @@ -9,7 +9,7 @@ description: Links to all versions of product documentation for Calico, Calico E * [Calico 3.27](https://docs.tigera.io/calico/3.27/about) * [Calico 3.26](https://docs.tigera.io/calico/3.26/about) -* [Calico 3.25](https://docs.tigera.io/calico/3.25/) +* [Calico 3.25](https://archive-os-3-25.netlify.app/calico/3.25/) * [Calico 3.24](https://archive-os-3-24.netlify.app/calico/3.24/) * [Calico 3.23](https://docs.tigera.io/archive/v3.23) * [Calico 3.22](https://docs.tigera.io/archive/v3.22)